1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import freechips.rocketchip.tile.HasFPUParameters 6import utils._ 7import xiangshan._ 8import xiangshan.cache._ 9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO} 10import xiangshan.backend.LSUOpType 11import xiangshan.mem._ 12import xiangshan.backend.roq.RoqLsqIO 13import xiangshan.backend.fu.HasExceptionNO 14 15 16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { } 17 18object LqPtr extends HasXSParameter { 19 def apply(f: Bool, v: UInt): LqPtr = { 20 val ptr = Wire(new LqPtr) 21 ptr.flag := f 22 ptr.value := v 23 ptr 24 } 25} 26 27trait HasLoadHelper { this: XSModule => 28 def rdataHelper(uop: MicroOp, rdata: UInt): UInt = { 29 val fpWen = uop.ctrl.fpWen 30 LookupTree(uop.ctrl.fuOpType, List( 31 LSUOpType.lb -> SignExt(rdata(7, 0) , XLEN), 32 LSUOpType.lh -> SignExt(rdata(15, 0), XLEN), 33 LSUOpType.lw -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)), 34 LSUOpType.ld -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)), 35 LSUOpType.lbu -> ZeroExt(rdata(7, 0) , XLEN), 36 LSUOpType.lhu -> ZeroExt(rdata(15, 0), XLEN), 37 LSUOpType.lwu -> ZeroExt(rdata(31, 0), XLEN), 38 )) 39 } 40 41 def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = { 42 LookupTree(uop.ctrl.fuOpType, List( 43 LSUOpType.lw -> recode(rdata(31, 0), S), 44 LSUOpType.ld -> recode(rdata(63, 0), D) 45 )) 46 } 47} 48 49class LqEnqIO extends XSBundle { 50 val canAccept = Output(Bool()) 51 val sqCanAccept = Input(Bool()) 52 val needAlloc = Vec(RenameWidth, Input(Bool())) 53 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 54 val resp = Vec(RenameWidth, Output(new LqPtr)) 55} 56 57// Load Queue 58class LoadQueue extends XSModule 59 with HasDCacheParameters 60 with HasCircularQueuePtrHelper 61 with HasLoadHelper 62 with HasExceptionNO 63{ 64 val io = IO(new Bundle() { 65 val enq = new LqEnqIO 66 val brqRedirect = Flipped(ValidIO(new Redirect)) 67 val flush = Input(Bool()) 68 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 69 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 70 val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool())) 71 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load 72 val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 73 val roq = Flipped(new RoqLsqIO) 74 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 75 val dcache = Flipped(ValidIO(new Refill)) 76 val uncache = new DCacheWordIO 77 val exceptionAddr = new ExceptionAddrIO 78 }) 79 80 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 81 // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) 82 val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth)) 83 dataModule.io := DontCare 84 val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth)) 85 vaddrModule.io := DontCare 86 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 87 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 88 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 89 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 90 // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 91 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 92 93 val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst 94 val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst 95 96 val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr)))) 97 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 98 val deqPtrExtNext = Wire(new LqPtr) 99 val allowEnqueue = RegInit(true.B) 100 101 val enqPtr = enqPtrExt(0).value 102 val deqPtr = deqPtrExt.value 103 104 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 105 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 106 107 val commitCount = RegNext(io.roq.lcommit) 108 109 /** 110 * Enqueue at dispatch 111 * 112 * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth) 113 */ 114 io.enq.canAccept := allowEnqueue 115 116 for (i <- 0 until RenameWidth) { 117 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 118 val lqIdx = enqPtrExt(offset) 119 val index = lqIdx.value 120 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush)) { 121 uop(index) := io.enq.req(i).bits 122 allocated(index) := true.B 123 datavalid(index) := false.B 124 writebacked(index) := false.B 125 miss(index) := false.B 126 // listening(index) := false.B 127 pending(index) := false.B 128 } 129 io.enq.resp(i) := lqIdx 130 } 131 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 132 133 /** 134 * Writeback load from load units 135 * 136 * Most load instructions writeback to regfile at the same time. 137 * However, 138 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 139 * (2) For an mmio instruction without exceptions, it does not write back. 140 * The mmio instruction will be sent to lower level when it reaches ROB's head. 141 * After uncache response, it will write back through arbiter with loadUnit. 142 * (3) For cache misses, it is marked miss and sent to dcache later. 143 * After cache refills, it will write back through arbiter with loadUnit. 144 */ 145 for (i <- 0 until LoadPipelineWidth) { 146 dataModule.io.wb.wen(i) := false.B 147 vaddrModule.io.wen(i) := false.B 148 when(io.loadIn(i).fire()) { 149 when(io.loadIn(i).bits.miss) { 150 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 151 io.loadIn(i).bits.uop.lqIdx.asUInt, 152 io.loadIn(i).bits.uop.cf.pc, 153 io.loadIn(i).bits.vaddr, 154 io.loadIn(i).bits.paddr, 155 io.loadIn(i).bits.data, 156 io.loadIn(i).bits.mask, 157 io.loadIn(i).bits.forwardData.asUInt, 158 io.loadIn(i).bits.forwardMask.asUInt, 159 io.loadIn(i).bits.mmio 160 ) 161 }.otherwise { 162 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 163 io.loadIn(i).bits.uop.lqIdx.asUInt, 164 io.loadIn(i).bits.uop.cf.pc, 165 io.loadIn(i).bits.vaddr, 166 io.loadIn(i).bits.paddr, 167 io.loadIn(i).bits.data, 168 io.loadIn(i).bits.mask, 169 io.loadIn(i).bits.forwardData.asUInt, 170 io.loadIn(i).bits.forwardMask.asUInt, 171 io.loadIn(i).bits.mmio 172 )} 173 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 174 datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) && !io.loadIn(i).bits.mmio 175 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 176 177 val loadWbData = Wire(new LQDataEntry) 178 loadWbData.paddr := io.loadIn(i).bits.paddr 179 loadWbData.mask := io.loadIn(i).bits.mask 180 loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data 181 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 182 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 183 dataModule.io.wb.wen(i) := true.B 184 185 vaddrModule.io.waddr(i) := loadWbIndex 186 vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr 187 vaddrModule.io.wen(i) := true.B 188 189 debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio 190 debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr 191 192 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 193 miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) 194 pending(loadWbIndex) := io.loadIn(i).bits.mmio 195 uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime 196 } 197 } 198 199 when(io.dcache.valid) { 200 XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data) 201 } 202 203 // Refill 64 bit in a cycle 204 // Refill data comes back from io.dcache.resp 205 dataModule.io.refill.valid := io.dcache.valid 206 dataModule.io.refill.paddr := io.dcache.bits.addr 207 dataModule.io.refill.data := io.dcache.bits.data 208 209 (0 until LoadQueueSize).map(i => { 210 dataModule.io.refill.refillMask(i) := allocated(i) && miss(i) 211 when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) { 212 datavalid(i) := true.B 213 miss(i) := false.B 214 } 215 }) 216 217 // Writeback up to 2 missed load insts to CDB 218 // 219 // Pick 2 missed load (data refilled), write them back to cdb 220 // 2 refilled load will be selected from even/odd entry, separately 221 222 // Stage 0 223 // Generate writeback indexes 224 225 def getEvenBits(input: UInt): UInt = { 226 require(input.getWidth == LoadQueueSize) 227 VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt 228 } 229 def getOddBits(input: UInt): UInt = { 230 require(input.getWidth == LoadQueueSize) 231 VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt 232 } 233 234 val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle 235 val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid 236 237 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 238 allocated(i) && !writebacked(i) && datavalid(i) 239 })).asUInt() // use uint instead vec to reduce verilog lines 240 val evenDeqMask = getEvenBits(deqMask) 241 val oddDeqMask = getOddBits(deqMask) 242 // generate lastCycleSelect mask 243 val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U) 244 val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U) 245 // generate real select vec 246 val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask 247 val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask 248 249 def toVec(a: UInt): Vec[Bool] = { 250 VecInit(a.asBools) 251 } 252 253 val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) 254 val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool())) 255 loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W)) 256 loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR 257 loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W)) 258 loadWbSelVGen(1) := loadOddSelVec.asUInt.orR 259 260 (0 until LoadPipelineWidth).map(i => { 261 loadWbSel(i) := RegNext(loadWbSelGen(i)) 262 loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B) 263 when(io.ldout(i).fire()){ 264 // Mark them as writebacked, so they will not be selected in the next cycle 265 writebacked(loadWbSel(i)) := true.B 266 } 267 }) 268 269 // Stage 1 270 // Use indexes generated in cycle 0 to read data 271 // writeback data to cdb 272 (0 until LoadPipelineWidth).map(i => { 273 // data select 274 dataModule.io.wb.raddr(i) := loadWbSelGen(i) 275 val rdata = dataModule.io.wb.rdata(i).data 276 val seluop = uop(loadWbSel(i)) 277 val func = seluop.ctrl.fuOpType 278 val raddr = dataModule.io.wb.rdata(i).paddr 279 val rdataSel = LookupTree(raddr(2, 0), List( 280 "b000".U -> rdata(63, 0), 281 "b001".U -> rdata(63, 8), 282 "b010".U -> rdata(63, 16), 283 "b011".U -> rdata(63, 24), 284 "b100".U -> rdata(63, 32), 285 "b101".U -> rdata(63, 40), 286 "b110".U -> rdata(63, 48), 287 "b111".U -> rdata(63, 56) 288 )) 289 val rdataPartialLoad = rdataHelper(seluop, rdataSel) 290 291 // writeback missed int/fp load 292 // 293 // Int load writeback will finish (if not blocked) in one cycle 294 io.ldout(i).bits.uop := seluop 295 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 296 io.ldout(i).bits.data := rdataPartialLoad 297 io.ldout(i).bits.redirectValid := false.B 298 io.ldout(i).bits.redirect := DontCare 299 io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i)) 300 io.ldout(i).bits.debug.isPerfCnt := false.B 301 io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i)) 302 io.ldout(i).bits.fflags := DontCare 303 io.ldout(i).valid := loadWbSelV(i) 304 305 when(io.ldout(i).fire()) { 306 XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n", 307 io.ldout(i).bits.uop.roqIdx.asUInt, 308 io.ldout(i).bits.uop.lqIdx.asUInt, 309 io.ldout(i).bits.uop.cf.pc, 310 debug_mmio(loadWbSel(i)) 311 ) 312 } 313 314 }) 315 316 /** 317 * Load commits 318 * 319 * When load commited, mark it as !allocated and move deqPtrExt forward. 320 */ 321 (0 until CommitWidth).map(i => { 322 when(commitCount > i.U){ 323 allocated(deqPtr+i.U) := false.B 324 } 325 }) 326 327 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 328 val length = mask.length 329 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 330 val highBitsUint = Cat(highBits.reverse) 331 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 332 } 333 334 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 335 assert(valid.length == uop.length) 336 assert(valid.length == 2) 337 Mux(valid(0) && valid(1), 338 Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)), 339 Mux(valid(0) && !valid(1), uop(0), uop(1))) 340 } 341 342 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 343 assert(valid.length == uop.length) 344 val length = valid.length 345 (0 until length).map(i => { 346 (0 until length).map(j => { 347 Mux(valid(i) && valid(j), 348 isAfter(uop(i).roqIdx, uop(j).roqIdx), 349 Mux(!valid(i), true.B, false.B)) 350 }) 351 }) 352 } 353 354 /** 355 * Memory violation detection 356 * 357 * When store writes back, it searches LoadQueue for younger load instructions 358 * with the same load physical address. They loaded wrong data and need re-execution. 359 * 360 * Cycle 0: Store Writeback 361 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 362 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 363 * Cycle 1: Redirect Generation 364 * There're three possible types of violations. Choose the oldest load. 365 * Prepare redirect request according to the detected violation. 366 * Cycle 2: Redirect Fire 367 * Fire redirect request (if valid) 368 */ 369 io.load_s1 := DontCare 370 def detectRollback(i: Int) = { 371 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 372 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 373 val xorMask = lqIdxMask ^ enqMask 374 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 375 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 376 377 // check if load already in lq needs to be rolledback 378 dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr 379 dataModule.io.violation(i).mask := io.storeIn(i).bits.mask 380 val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask) 381 val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => { 382 allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j)) 383 }))) 384 val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => { 385 addrMaskMatch(j) && entryNeedCheck(j) 386 })) 387 val lqViolation = lqViolationVec.asUInt().orR() 388 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 389 val lqViolationUop = uop(lqViolationIndex) 390 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 391 // lqViolationUop.lqIdx.value := lqViolationIndex 392 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 393 394 // when l/s writeback to roq together, check if rollback is needed 395 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 396 io.loadIn(j).valid && 397 isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 398 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 399 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 400 }))) 401 val wbViolation = wbViolationVec.asUInt().orR() 402 val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop)))) 403 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 404 405 // check if rollback is needed for load in l1 406 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 407 io.load_s1(j).valid && // L1 valid 408 isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 409 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 410 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 411 }))) 412 val l1Violation = l1ViolationVec.asUInt().orR() 413 val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop)))) 414 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 415 416 val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation) 417 val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop) 418 419 val mask = getAfterMask(rollbackValidVec, rollbackUopVec) 420 val oneAfterZero = mask(1)(0) 421 val rollbackUop = Mux(oneAfterZero && mask(2)(0), 422 rollbackUopVec(0), 423 Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2))) 424 425 XSDebug( 426 l1Violation, 427 "need rollback (l4 load) pc %x roqidx %d target %x\n", 428 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt 429 ) 430 XSDebug( 431 lqViolation, 432 "need rollback (ld wb before store) pc %x roqidx %d target %x\n", 433 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt 434 ) 435 XSDebug( 436 wbViolation, 437 "need rollback (ld/st wb together) pc %x roqidx %d target %x\n", 438 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt 439 ) 440 441 (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop) 442 } 443 444 // rollback check 445 val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp))) 446 for (i <- 0 until StorePipelineWidth) { 447 val detectedRollback = detectRollback(i) 448 rollback(i).valid := detectedRollback._1 449 rollback(i).bits := detectedRollback._2 450 } 451 452 def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = { 453 Mux( 454 a.valid, 455 Mux( 456 b.valid, 457 Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest 458 a // sel a 459 ), 460 b // sel b 461 ) 462 } 463 464 val rollbackSelected = ParallelOperation(rollback, rollbackSel) 465 val lastCycleRedirect = RegNext(io.brqRedirect) 466 val lastCycleFlush = RegNext(io.flush) 467 468 // S2: select rollback and generate rollback request 469 // Note that we use roqIdx - 1.U to flush the load instruction itself. 470 // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect. 471 val rollbackGen = Wire(Valid(new Redirect)) 472 val rollbackReg = Reg(Valid(new Redirect)) 473 rollbackGen.valid := rollbackSelected.valid && 474 !rollbackSelected.bits.roqIdx.needFlush(lastCycleRedirect, lastCycleFlush) 475 476 rollbackGen.bits.roqIdx := rollbackSelected.bits.roqIdx 477 rollbackGen.bits.ftqIdx := rollbackSelected.bits.cf.ftqPtr 478 rollbackGen.bits.ftqOffset := rollbackSelected.bits.cf.ftqOffset 479 rollbackGen.bits.level := RedirectLevel.flush 480 rollbackGen.bits.interrupt := DontCare 481 rollbackGen.bits.cfiUpdate := DontCare 482 rollbackGen.bits.cfiUpdate.target := rollbackSelected.bits.cf.pc 483 484 rollbackReg := rollbackGen 485 486 // S3: fire rollback request 487 io.rollback := rollbackReg 488 io.rollback.valid := rollbackReg.valid && 489 !rollbackReg.bits.roqIdx.needFlush(lastCycleRedirect, lastCycleFlush) 490 491 when(io.rollback.valid) { 492 // XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.cfi, io.rollback.bits.roqIdx.asUInt) 493 } 494 495 /** 496 * Memory mapped IO / other uncached operations 497 * 498 * States: 499 * (1) writeback from store units: mark as pending 500 * (2) when they reach ROB's head, they can be sent to uncache channel 501 * (3) response from uncache channel: mark as datavalid 502 * (4) writeback to ROB (and other units): mark as writebacked 503 * (5) ROB commits the instruction: same as normal instructions 504 */ 505 //(2) when they reach ROB's head, they can be sent to uncache channel 506 val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4) 507 val uncacheState = RegInit(s_idle) 508 switch(uncacheState) { 509 is(s_idle) { 510 when(io.roq.pendingld && pending(deqPtr) && allocated(deqPtr)) { 511 uncacheState := s_req 512 } 513 } 514 is(s_req) { 515 when(io.uncache.req.fire()) { 516 uncacheState := s_resp 517 } 518 } 519 is(s_resp) { 520 when(io.uncache.resp.fire()) { 521 uncacheState := s_wait 522 } 523 } 524 is(s_wait) { 525 when(io.roq.commit) { 526 uncacheState := s_idle // ready for next mmio 527 } 528 } 529 } 530 io.uncache.req.valid := uncacheState === s_req 531 532 dataModule.io.uncache.raddr := deqPtrExtNext.value 533 534 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 535 io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr 536 io.uncache.req.bits.data := dataModule.io.uncache.rdata.data 537 io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask 538 539 io.uncache.req.bits.id := DontCare 540 541 io.uncache.resp.ready := true.B 542 543 when (io.uncache.req.fire()) { 544 pending(deqPtr) := false.B 545 546 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 547 uop(deqPtr).cf.pc, 548 io.uncache.req.bits.addr, 549 io.uncache.req.bits.data, 550 io.uncache.req.bits.cmd, 551 io.uncache.req.bits.mask 552 ) 553 } 554 555 // (3) response from uncache channel: mark as datavalid 556 dataModule.io.uncache.wen := false.B 557 when(io.uncache.resp.fire()){ 558 datavalid(deqPtr) := true.B 559 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 560 dataModule.io.uncache.wen := true.B 561 562 XSDebug("uncache resp: data %x\n", io.dcache.bits.data) 563 } 564 565 // Read vaddr for mem exception 566 vaddrModule.io.raddr(0) := deqPtr + commitCount 567 io.exceptionAddr.vaddr := vaddrModule.io.rdata(0) 568 569 // misprediction recovery / exception redirect 570 // invalidate lq term using robIdx 571 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 572 for (i <- 0 until LoadQueueSize) { 573 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect, io.flush) && allocated(i) 574 when (needCancel(i)) { 575 allocated(i) := false.B 576 } 577 } 578 579 /** 580 * update pointers 581 */ 582 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 583 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 584 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush), PopCount(io.enq.req.map(_.valid)), 0.U) 585 when (lastCycleRedirect.valid || lastCycleFlush) { 586 // we recover the pointers in the next cycle after redirect 587 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 588 }.otherwise { 589 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 590 } 591 592 deqPtrExtNext := deqPtrExt + commitCount 593 deqPtrExt := deqPtrExtNext 594 595 val validCount = distanceBetween(enqPtrExt(0), deqPtrExt) 596 597 allowEnqueue := validCount + enqNumber <= (LoadQueueSize - RenameWidth).U 598 599 // debug info 600 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 601 602 def PrintFlag(flag: Bool, name: String): Unit = { 603 when(flag) { 604 XSDebug(false, true.B, name) 605 }.otherwise { 606 XSDebug(false, true.B, " ") 607 } 608 } 609 610 for (i <- 0 until LoadQueueSize) { 611 if (i % 4 == 0) XSDebug("") 612 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr) 613 PrintFlag(allocated(i), "a") 614 PrintFlag(allocated(i) && datavalid(i), "v") 615 PrintFlag(allocated(i) && writebacked(i), "w") 616 PrintFlag(allocated(i) && miss(i), "m") 617 // PrintFlag(allocated(i) && listening(i), "l") 618 PrintFlag(allocated(i) && pending(i), "p") 619 XSDebug(false, true.B, " ") 620 if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n") 621 } 622 623} 624