1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.frontend.icache 18 19import chisel3._ 20import chisel3.util._ 21import difftest._ 22import freechips.rocketchip.tilelink._ 23import huancun.PreferCacheKey 24import org.chipsalliance.cde.config.Parameters 25import utility._ 26import utils._ 27import xiangshan.SoftIfetchPrefetchBundle 28import xiangshan.XSCoreParamsKey 29import xiangshan.backend.fu.PMPReqBundle 30import xiangshan.backend.fu.PMPRespBundle 31import xiangshan.cache.mmu._ 32import xiangshan.frontend._ 33 34abstract class IPrefetchBundle(implicit p: Parameters) extends ICacheBundle 35abstract class IPrefetchModule(implicit p: Parameters) extends ICacheModule 36 37class IPrefetchReq(implicit p: Parameters) extends IPrefetchBundle { 38 val startAddr: UInt = UInt(VAddrBits.W) 39 val nextlineStart: UInt = UInt(VAddrBits.W) 40 val ftqIdx: FtqPtr = new FtqPtr 41 val isSoftPrefetch: Bool = Bool() 42 val backendException: UInt = UInt(ExceptionType.width.W) 43 def crossCacheline: Bool = startAddr(blockOffBits - 1) === 1.U 44 45 def fromFtqICacheInfo(info: FtqICacheInfo): IPrefetchReq = { 46 this.startAddr := info.startAddr 47 this.nextlineStart := info.nextlineStart 48 this.ftqIdx := info.ftqIdx 49 this.isSoftPrefetch := false.B 50 this 51 } 52 53 def fromSoftPrefetch(req: SoftIfetchPrefetchBundle): IPrefetchReq = { 54 this.startAddr := req.vaddr 55 this.nextlineStart := req.vaddr + (1 << blockOffBits).U 56 this.ftqIdx := DontCare 57 this.isSoftPrefetch := true.B 58 this 59 } 60} 61 62class IPrefetchIO(implicit p: Parameters) extends IPrefetchBundle { 63 // control 64 val csr_pf_enable = Input(Bool()) 65 val csr_parity_enable = Input(Bool()) 66 val flush = Input(Bool()) 67 68 val req = Flipped(Decoupled(new IPrefetchReq)) 69 val flushFromBpu = Flipped(new BpuFlushInfo) 70 val itlb = Vec(PortNumber, new TlbRequestIO) 71 val pmp = Vec(PortNumber, new ICachePMPBundle) 72 val metaRead = new ICacheMetaReqBundle 73 val MSHRReq = DecoupledIO(new ICacheMissReq) 74 val MSHRResp = Flipped(ValidIO(new ICacheMissResp)) 75 val wayLookupWrite = DecoupledIO(new WayLookupInfo) 76} 77 78class IPrefetchPipe(implicit p: Parameters) extends IPrefetchModule { 79 val io: IPrefetchIO = IO(new IPrefetchIO) 80 81 val (toITLB, fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp)) 82 val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 83 val (toMeta, fromMeta) = (io.metaRead.toIMeta, io.metaRead.fromIMeta) 84 val (toMSHR, fromMSHR) = (io.MSHRReq, io.MSHRResp) 85 val toWayLookup = io.wayLookupWrite 86 87 val s0_fire, s1_fire, s2_fire = WireInit(false.B) 88 val s0_discard, s2_discard = WireInit(false.B) 89 val s0_ready, s1_ready, s2_ready = WireInit(false.B) 90 val s0_flush, s1_flush, s2_flush = WireInit(false.B) 91 val from_bpu_s0_flush, from_bpu_s1_flush = WireInit(false.B) 92 93 /** 94 ****************************************************************************** 95 * IPrefetch Stage 0 96 * - 1. receive ftq req 97 * - 2. send req to ITLB 98 * - 3. send req to Meta SRAM 99 ****************************************************************************** 100 */ 101 val s0_valid = io.req.valid 102 103 /** 104 ****************************************************************************** 105 * receive ftq req 106 ****************************************************************************** 107 */ 108 val s0_req_vaddr = VecInit(Seq(io.req.bits.startAddr, io.req.bits.nextlineStart)) 109 val s0_req_ftqIdx = io.req.bits.ftqIdx 110 val s0_isSoftPrefetch = io.req.bits.isSoftPrefetch 111 val s0_doubleline = io.req.bits.crossCacheline 112 val s0_req_vSetIdx = s0_req_vaddr.map(get_idx) 113 val s0_backendException = VecInit(Seq.fill(PortNumber)(io.req.bits.backendException)) 114 115 from_bpu_s0_flush := !s0_isSoftPrefetch && (io.flushFromBpu.shouldFlushByStage2(s0_req_ftqIdx) || 116 io.flushFromBpu.shouldFlushByStage3(s0_req_ftqIdx)) 117 s0_flush := io.flush || from_bpu_s0_flush || s1_flush 118 119 val s0_can_go = s1_ready && toITLB(0).ready && toITLB(1).ready && toMeta.ready 120 io.req.ready := s0_can_go 121 122 s0_fire := s0_valid && s0_can_go && !s0_flush 123 124 /** 125 ****************************************************************************** 126 * IPrefetch Stage 1 127 * - 1. Receive resp from ITLB 128 * - 2. Receive resp from IMeta and check 129 * - 3. Monitor the requests from missUnit to write to SRAM. 130 * - 4. Wirte wayLookup 131 ****************************************************************************** 132 */ 133 val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B) 134 135 val s1_req_vaddr = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire) 136 val s1_isSoftPrefetch = RegEnable(s0_isSoftPrefetch, 0.U.asTypeOf(s0_isSoftPrefetch), s0_fire) 137 val s1_doubleline = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire) 138 val s1_req_ftqIdx = RegEnable(s0_req_ftqIdx, 0.U.asTypeOf(s0_req_ftqIdx), s0_fire) 139 val s1_req_vSetIdx = VecInit(s1_req_vaddr.map(get_idx)) 140 val s1_backendException = RegEnable(s0_backendException, 0.U.asTypeOf(s0_backendException), s0_fire) 141 142 val m_idle :: m_itlbResend :: m_metaResend :: m_enqWay :: m_enterS2 :: Nil = Enum(5) 143 val state = RegInit(m_idle) 144 val next_state = WireDefault(state) 145 val s0_fire_r = RegNext(s0_fire) 146 dontTouch(state) 147 dontTouch(next_state) 148 state := next_state 149 150 /** 151 ****************************************************************************** 152 * resend itlb req if miss 153 ****************************************************************************** 154 */ 155 val s1_wait_itlb = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 156 (0 until PortNumber).foreach { i => 157 when(s1_flush) { 158 s1_wait_itlb(i) := false.B 159 }.elsewhen(RegNext(s0_fire) && fromITLB(i).bits.miss) { 160 s1_wait_itlb(i) := true.B 161 }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) { 162 s1_wait_itlb(i) := false.B 163 } 164 } 165 val s1_need_itlb = VecInit(Seq( 166 (RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss, 167 (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_doubleline 168 )) 169 val tlb_valid_pulse = VecInit(Seq( 170 (RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss, 171 (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_doubleline 172 )) 173 val tlb_valid_latch = 174 VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_pulse(i), s1_fire, flush = s1_flush))) 175 val itlb_finish = tlb_valid_latch(0) && (!s1_doubleline || tlb_valid_latch(1)) 176 177 for (i <- 0 until PortNumber) { 178 toITLB(i).valid := s1_need_itlb(i) || (s0_valid && (if (i == 0) true.B else s0_doubleline)) 179 toITLB(i).bits := DontCare 180 toITLB(i).bits.size := 3.U 181 toITLB(i).bits.vaddr := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 182 toITLB(i).bits.debug.pc := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 183 toITLB(i).bits.cmd := TlbCmd.exec 184 toITLB(i).bits.no_translate := false.B 185 } 186 fromITLB.foreach(_.ready := true.B) 187 io.itlb.foreach(_.req_kill := false.B) 188 189 /** 190 ****************************************************************************** 191 * Receive resp from ITLB 192 ****************************************************************************** 193 */ 194 val s1_req_paddr_wire = VecInit(fromITLB.map(_.bits.paddr(0))) 195 val s1_req_paddr_reg = VecInit((0 until PortNumber).map(i => 196 RegEnable(s1_req_paddr_wire(i), 0.U(PAddrBits.W), tlb_valid_pulse(i)) 197 )) 198 val s1_req_paddr = VecInit((0 until PortNumber).map(i => 199 Mux(tlb_valid_pulse(i), s1_req_paddr_wire(i), s1_req_paddr_reg(i)) 200 )) 201 val s1_req_gpaddr_tmp = VecInit((0 until PortNumber).map(i => 202 ResultHoldBypass( 203 valid = tlb_valid_pulse(i), 204 init = 0.U.asTypeOf(fromITLB(i).bits.gpaddr(0)), 205 data = fromITLB(i).bits.gpaddr(0) 206 ) 207 )) 208 val s1_req_isForVSnonLeafPTE_tmp = VecInit((0 until PortNumber).map(i => 209 ResultHoldBypass( 210 valid = tlb_valid_pulse(i), 211 init = 0.U.asTypeOf(fromITLB(i).bits.isForVSnonLeafPTE), 212 data = fromITLB(i).bits.isForVSnonLeafPTE 213 ) 214 )) 215 val s1_itlb_exception = VecInit((0 until PortNumber).map(i => 216 ResultHoldBypass( 217 valid = tlb_valid_pulse(i), 218 init = 0.U(ExceptionType.width.W), 219 data = ExceptionType.fromTlbResp(fromITLB(i).bits) 220 ) 221 )) 222 val s1_itlb_pbmt = VecInit((0 until PortNumber).map(i => 223 ResultHoldBypass( 224 valid = tlb_valid_pulse(i), 225 init = 0.U.asTypeOf(fromITLB(i).bits.pbmt(0)), 226 data = fromITLB(i).bits.pbmt(0) 227 ) 228 )) 229 val s1_itlb_exception_gpf = VecInit(s1_itlb_exception.map(_ === ExceptionType.gpf)) 230 231 /* Select gpaddr with the first gpf 232 * Note: the backend wants the base guest physical address of a fetch block 233 * for port(i), its base gpaddr is actually (gpaddr - i * blocksize) 234 * see GPAMem: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/backend/GPAMem.scala#L33-L34 235 * see also: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/frontend/IFU.scala#L374-L375 236 */ 237 val s1_req_gpaddr = PriorityMuxDefault( 238 s1_itlb_exception_gpf zip (0 until PortNumber).map(i => s1_req_gpaddr_tmp(i) - (i << blockOffBits).U), 239 0.U.asTypeOf(s1_req_gpaddr_tmp(0)) 240 ) 241 242 val s1_req_isForVSnonLeafPTE = PriorityMuxDefault( 243 s1_itlb_exception_gpf zip s1_req_isForVSnonLeafPTE_tmp, 244 0.U.asTypeOf(s1_req_isForVSnonLeafPTE_tmp(0)) 245 ) 246 247 /** 248 ****************************************************************************** 249 * resend metaArray read req when itlb miss finish 250 ****************************************************************************** 251 */ 252 val s1_need_meta = ((state === m_itlbResend) && itlb_finish) || (state === m_metaResend) 253 toMeta.valid := s1_need_meta || s0_valid 254 toMeta.bits := DontCare 255 toMeta.bits.isDoubleLine := Mux(s1_need_meta, s1_doubleline, s0_doubleline) 256 257 for (i <- 0 until PortNumber) { 258 toMeta.bits.vSetIdx(i) := Mux(s1_need_meta, s1_req_vSetIdx(i), s0_req_vSetIdx(i)) 259 } 260 261 /** 262 ****************************************************************************** 263 * Receive resp from IMeta and check 264 ****************************************************************************** 265 */ 266 val s1_req_ptags = VecInit(s1_req_paddr.map(get_phy_tag)) 267 268 val s1_meta_ptags = fromMeta.tags 269 val s1_meta_valids = fromMeta.entryValid 270 271 def get_waymask(paddrs: Vec[UInt]): Vec[UInt] = { 272 val ptags = paddrs.map(get_phy_tag) 273 val tag_eq_vec = 274 VecInit((0 until PortNumber).map(p => VecInit((0 until nWays).map(w => s1_meta_ptags(p)(w) === ptags(p))))) 275 val tag_match_vec = VecInit((0 until PortNumber).map(k => 276 VecInit(tag_eq_vec(k).zipWithIndex.map { case (way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w) }) 277 )) 278 val waymasks = VecInit(tag_match_vec.map(_.asUInt)) 279 waymasks 280 } 281 282 val s1_SRAM_waymasks = VecInit((0 until PortNumber).map { port => 283 Mux(tlb_valid_pulse(port), get_waymask(s1_req_paddr_wire)(port), get_waymask(s1_req_paddr_reg)(port)) 284 }) 285 286 // select ecc code 287 /* NOTE: 288 * When ECC check fails, s1_waymasks may be corrupted, so this selected meta_codes may be wrong. 289 * However, we can guarantee that the request sent to the l2 cache and the response to the IFU are both correct, 290 * considering the probability of bit flipping abnormally is very small, consider there's up to 1 bit being wrong: 291 * 1. miss -> fake hit: The wrong bit in s1_waymasks was set to true.B, thus selects the wrong meta_codes, 292 * but we can detect this by checking whether `encodeMetaECC(req_ptags) === meta_codes`. 293 * 2. hit -> fake multi-hit: In normal situation, multi-hit never happens, so multi-hit indicates ECC failure, 294 * we can detect this by checking whether `PopCount(waymasks) <= 1.U`, 295 * and meta_codes is not important in this situation. 296 * 3. hit -> fake miss: We can't detect this, but we can (pre)fetch the correct data from L2 cache, so it's not a problem. 297 * 4. hit -> hit / miss -> miss: ECC failure happens in a irrelevant way, so we don't care about it this time. 298 */ 299 val s1_SRAM_meta_codes = VecInit((0 until PortNumber).map { port => 300 Mux1H(s1_SRAM_waymasks(port), fromMeta.codes(port)) 301 }) 302 303 /** 304 ****************************************************************************** 305 * update waymasks and meta_codes according to MSHR update data 306 ****************************************************************************** 307 */ 308 def update_meta_info(mask: UInt, vSetIdx: UInt, ptag: UInt, code: UInt): Tuple2[UInt, UInt] = { 309 require(mask.getWidth == nWays) 310 val new_mask = WireInit(mask) 311 val new_code = WireInit(code) 312 val valid = fromMSHR.valid && !fromMSHR.bits.corrupt 313 val vset_same = fromMSHR.bits.vSetIdx === vSetIdx 314 val ptag_same = getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag 315 val way_same = fromMSHR.bits.waymask === mask 316 when(valid && vset_same) { 317 when(ptag_same) { 318 new_mask := fromMSHR.bits.waymask 319 // also update meta_codes 320 // we have getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag, so we can use ptag directly for better timing 321 new_code := encodeMetaECC(ptag) 322 }.elsewhen(way_same) { 323 new_mask := 0.U 324 // we dont care about new_code, since it's not used for a missed request 325 } 326 } 327 (new_mask, new_code) 328 } 329 330 val s1_SRAM_valid = s0_fire_r || RegNext(s1_need_meta && toMeta.ready) 331 val s1_MSHR_valid = fromMSHR.valid && !fromMSHR.bits.corrupt 332 val s1_waymasks = WireInit(VecInit(Seq.fill(PortNumber)(0.U(nWays.W)))) 333 val s1_waymasks_r = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_SRAM_valid || s1_MSHR_valid) 334 val s1_meta_codes = WireInit(VecInit(Seq.fill(PortNumber)(0.U(ICacheMetaCodeBits.W)))) 335 val s1_meta_codes_r = RegEnable(s1_meta_codes, 0.U.asTypeOf(s1_meta_codes), s1_SRAM_valid || s1_MSHR_valid) 336 337 // update waymasks and meta_codes 338 (0 until PortNumber).foreach { i => 339 val old_waymask = Mux(s1_SRAM_valid, s1_SRAM_waymasks(i), s1_waymasks_r(i)) 340 val old_meta_codes = Mux(s1_SRAM_valid, s1_SRAM_meta_codes(i), s1_meta_codes_r(i)) 341 val new_info = update_meta_info(old_waymask, s1_req_vSetIdx(i), s1_req_ptags(i), old_meta_codes) 342 s1_waymasks(i) := new_info._1 343 s1_meta_codes(i) := new_info._2 344 } 345 346 /** 347 ****************************************************************************** 348 * send enqueu req to WayLookup 349 ******** ********************************************************************** 350 */ 351 // Disallow enqueuing wayLookup when SRAM write occurs. 352 toWayLookup.valid := ((state === m_enqWay) || ((state === m_idle) && itlb_finish)) && 353 !s1_flush && !fromMSHR.valid && !s1_isSoftPrefetch // do not enqueue soft prefetch 354 toWayLookup.bits.vSetIdx := s1_req_vSetIdx 355 toWayLookup.bits.waymask := s1_waymasks 356 toWayLookup.bits.ptag := s1_req_ptags 357 toWayLookup.bits.gpaddr := s1_req_gpaddr 358 toWayLookup.bits.isForVSnonLeafPTE := s1_req_isForVSnonLeafPTE 359 toWayLookup.bits.meta_codes := s1_meta_codes 360 (0 until PortNumber).foreach { i => 361 val excpValid = if (i == 0) true.B 362 else s1_doubleline // exception in first line is always valid, in second line is valid iff is doubleline request 363 // Send s1_itlb_exception to WayLookup (instead of s1_exception_out) for better timing. Will check pmp again in mainPipe 364 toWayLookup.bits.itlb_exception(i) := Mux(excpValid, s1_itlb_exception(i), ExceptionType.none) 365 toWayLookup.bits.itlb_pbmt(i) := Mux(excpValid, s1_itlb_pbmt(i), Pbmt.pma) 366 } 367 368 val s1_waymasks_vec = s1_waymasks.map(_.asTypeOf(Vec(nWays, Bool()))) 369 when(toWayLookup.fire) { 370 assert( 371 PopCount(s1_waymasks_vec(0)) <= 1.U && (PopCount(s1_waymasks_vec(1)) <= 1.U || !s1_doubleline), 372 "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ", 373 PopCount(s1_waymasks_vec(0)) > 1.U, 374 s1_req_ptags(0), 375 get_idx(s1_req_vaddr(0)), 376 s1_req_vaddr(0), 377 PopCount(s1_waymasks_vec(1)) > 1.U && s1_doubleline, 378 s1_req_ptags(1), 379 get_idx(s1_req_vaddr(1)), 380 s1_req_vaddr(1) 381 ) 382 } 383 384 /** 385 ****************************************************************************** 386 * PMP check 387 ****************************************************************************** 388 */ 389 toPMP.zipWithIndex.foreach { case (p, i) => 390 // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped 391 p.valid := s1_valid // && s1_itlb_exception === ExceptionType.none 392 p.bits.addr := s1_req_paddr(i) 393 p.bits.size := 3.U // TODO 394 p.bits.cmd := TlbCmd.exec 395 } 396 val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp)) 397 val s1_pmp_mmio = VecInit(fromPMP.map(_.mmio)) 398 399 // merge s1 itlb/pmp exceptions, itlb has the highest priority, pmp next 400 // for timing consideration, meta_corrupt is not merged, and it will NOT cancel prefetch 401 val s1_exception_out = ExceptionType.merge( 402 s1_backendException, 403 s1_itlb_exception, 404 s1_pmp_exception 405 ) 406 407 // merge pmp mmio and itlb pbmt 408 val s1_mmio = VecInit((s1_pmp_mmio zip s1_itlb_pbmt).map { case (mmio, pbmt) => 409 mmio || Pbmt.isUncache(pbmt) 410 }) 411 412 /** 413 ****************************************************************************** 414 * state machine 415 ******** ********************************************************************** 416 */ 417 418 switch(state) { 419 is(m_idle) { 420 when(s1_valid) { 421 when(!itlb_finish) { 422 next_state := m_itlbResend 423 }.elsewhen(!toWayLookup.fire) { // itlb_finish 424 next_state := m_enqWay 425 }.elsewhen(!s2_ready) { // itlb_finish && toWayLookup.fire 426 next_state := m_enterS2 427 } // .otherwise { next_state := m_idle } 428 } // .otherwise { next_state := m_idle } // !s1_valid 429 } 430 is(m_itlbResend) { 431 when(itlb_finish) { 432 when(!toMeta.ready) { 433 next_state := m_metaResend 434 }.otherwise { // toMeta.ready 435 next_state := m_enqWay 436 } 437 } // .otherwise { next_state := m_itlbResend } // !itlb_finish 438 } 439 is(m_metaResend) { 440 when(toMeta.ready) { 441 next_state := m_enqWay 442 } // .otherwise { next_state := m_metaResend } // !toMeta.ready 443 } 444 is(m_enqWay) { 445 when(toWayLookup.fire || s1_isSoftPrefetch) { 446 when(!s2_ready) { 447 next_state := m_enterS2 448 }.otherwise { // s2_ready 449 next_state := m_idle 450 } 451 } // .otherwise { next_state := m_enqWay } 452 } 453 is(m_enterS2) { 454 when(s2_ready) { 455 next_state := m_idle 456 } 457 } 458 } 459 460 when(s1_flush) { 461 next_state := m_idle 462 } 463 464 /** Stage 1 control */ 465 from_bpu_s1_flush := s1_valid && !s1_isSoftPrefetch && io.flushFromBpu.shouldFlushByStage3(s1_req_ftqIdx) 466 s1_flush := io.flush || from_bpu_s1_flush 467 468 s1_ready := next_state === m_idle 469 s1_fire := (next_state === m_idle) && s1_valid && !s1_flush // used to clear s1_valid & itlb_valid_latch 470 val s1_real_fire = s1_fire && io.csr_pf_enable // real "s1 fire" that s1 enters s2 471 472 /** 473 ****************************************************************************** 474 * IPrefetch Stage 2 475 * - 1. Monitor the requests from missUnit to write to SRAM. 476 * - 2. send req to missUnit 477 ****************************************************************************** 478 */ 479 val s2_valid = 480 generatePipeControl(lastFire = s1_real_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B) 481 482 val s2_req_vaddr = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_real_fire) 483 val s2_isSoftPrefetch = RegEnable(s1_isSoftPrefetch, 0.U.asTypeOf(s1_isSoftPrefetch), s1_real_fire) 484 val s2_doubleline = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_real_fire) 485 val s2_req_paddr = RegEnable(s1_req_paddr, 0.U.asTypeOf(s1_req_paddr), s1_real_fire) 486 val s2_exception = 487 RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire) // includes itlb/pmp exception 488// val s2_exception_in = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire) // disabled for timing consideration 489 val s2_mmio = RegEnable(s1_mmio, 0.U.asTypeOf(s1_mmio), s1_real_fire) 490 val s2_waymasks = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_real_fire) 491// val s2_meta_codes = RegEnable(s1_meta_codes, 0.U.asTypeOf(s1_meta_codes), s1_real_fire) // disabled for timing consideration 492 493 val s2_req_vSetIdx = s2_req_vaddr.map(get_idx) 494 val s2_req_ptags = s2_req_paddr.map(get_phy_tag) 495 496 // disabled for timing consideration 497// // do metaArray ECC check 498// val s2_meta_corrupt = VecInit((s2_req_ptags zip s2_meta_codes zip s2_waymasks).map{ case ((meta, code), waymask) => 499// val hit_num = PopCount(waymask) 500// // NOTE: if not hit, encodeMetaECC(meta) =/= code can also be true, but we don't care about it 501// (encodeMetaECC(meta) =/= code && hit_num === 1.U) || // hit one way, but parity code does not match, ECC failure 502// hit_num > 1.U // hit multi way, must be a ECC failure 503// }) 504// 505// // generate exception 506// val s2_meta_exception = VecInit(s2_meta_corrupt.map(ExceptionType.fromECC(io.csr_parity_enable, _))) 507// 508// // merge meta exception and itlb/pmp exception 509// val s2_exception = ExceptionType.merge(s2_exception_in, s2_meta_exception) 510 511 /** 512 ****************************************************************************** 513 * Monitor the requests from missUnit to write to SRAM 514 ****************************************************************************** 515 */ 516 517 /* NOTE: If fromMSHR.bits.corrupt, we should set s2_MSHR_hits to false.B, and send prefetch requests again. 518 * This is the opposite of how mainPipe handles fromMSHR.bits.corrupt, 519 * in which we should set s2_MSHR_hits to true.B, and send error to ifu. 520 */ 521 val s2_MSHR_match = VecInit((0 until PortNumber).map(i => 522 (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) && 523 (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) && 524 s2_valid && fromMSHR.valid && !fromMSHR.bits.corrupt 525 )) 526 val s2_MSHR_hits = (0 until PortNumber).map(i => ValidHoldBypass(s2_MSHR_match(i), s2_fire || s2_flush)) 527 528 val s2_SRAM_hits = s2_waymasks.map(_.orR) 529 val s2_hits = VecInit((0 until PortNumber).map(i => s2_MSHR_hits(i) || s2_SRAM_hits(i))) 530 531 /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be prefetched 532 * mmio should not be prefetched 533 * also, if previous has exception, latter port should also not be prefetched 534 */ 535 val s2_miss = VecInit((0 until PortNumber).map { i => 536 !s2_hits(i) && (if (i == 0) true.B else s2_doubleline) && 537 s2_exception.take(i + 1).map(_ === ExceptionType.none).reduce(_ && _) && 538 s2_mmio.take(i + 1).map(!_).reduce(_ && _) 539 }) 540 541 /** 542 ****************************************************************************** 543 * send req to missUnit 544 ****************************************************************************** 545 */ 546 val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber)) 547 548 // To avoid sending duplicate requests. 549 val has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 550 (0 until PortNumber).foreach { i => 551 when(s1_real_fire) { 552 has_send(i) := false.B 553 }.elsewhen(toMSHRArbiter.io.in(i).fire) { 554 has_send(i) := true.B 555 } 556 } 557 558 (0 until PortNumber).map { i => 559 toMSHRArbiter.io.in(i).valid := s2_valid && s2_miss(i) && !has_send(i) 560 toMSHRArbiter.io.in(i).bits.blkPaddr := getBlkAddr(s2_req_paddr(i)) 561 toMSHRArbiter.io.in(i).bits.vSetIdx := s2_req_vSetIdx(i) 562 } 563 564 toMSHR <> toMSHRArbiter.io.out 565 566 s2_flush := io.flush 567 568 // toMSHRArbiter.io.in(i).fire is not used here for timing consideration 569 // val s2_finish = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i) || toMSHRArbiter.io.in(i).fire).reduce(_&&_) 570 val s2_finish = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i)).reduce(_ && _) 571 s2_ready := s2_finish || !s2_valid 572 s2_fire := s2_valid && s2_finish && !s2_flush 573 574 /** PerfAccumulate */ 575 // the number of bpu flush 576 XSPerfAccumulate("bpu_s0_flush", from_bpu_s0_flush) 577 XSPerfAccumulate("bpu_s1_flush", from_bpu_s1_flush) 578 // the number of prefetch request received from ftq or backend (software prefetch) 579// XSPerfAccumulate("prefetch_req_receive", io.req.fire) 580 XSPerfAccumulate("prefetch_req_receive_hw", io.req.fire && !io.req.bits.isSoftPrefetch) 581 XSPerfAccumulate("prefetch_req_receive_sw", io.req.fire && io.req.bits.isSoftPrefetch) 582 // the number of prefetch request sent to missUnit 583// XSPerfAccumulate("prefetch_req_send", toMSHR.fire) 584 XSPerfAccumulate("prefetch_req_send_hw", toMSHR.fire && !s2_isSoftPrefetch) 585 XSPerfAccumulate("prefetch_req_send_sw", toMSHR.fire && s2_isSoftPrefetch) 586 XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready) 587 588 /** 589 * Count the number of requests that are filtered for various reasons. 590 * The number of prefetch discard in Performance Accumulator may be 591 * a littel larger the number of really discarded. Because there can 592 * be multiple reasons for a canceled request at the same time. 593 */ 594 // discard prefetch request by flush 595 // XSPerfAccumulate("fdip_prefetch_discard_by_tlb_except", p1_discard && p1_tlb_except) 596 // // discard prefetch request by hit icache SRAM 597 // XSPerfAccumulate("fdip_prefetch_discard_by_hit_cache", p2_discard && p1_meta_hit) 598 // // discard prefetch request by hit wirte SRAM 599 // XSPerfAccumulate("fdip_prefetch_discard_by_p1_monoitor", p1_discard && p1_monitor_hit) 600 // // discard prefetch request by pmp except or mmio 601 // XSPerfAccumulate("fdip_prefetch_discard_by_pmp", p2_discard && p2_pmp_except) 602 // // discard prefetch request by hit mainPipe info 603 // // XSPerfAccumulate("fdip_prefetch_discard_by_mainPipe", p2_discard && p2_mainPipe_hit) 604} 605