1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import utils._ 6import xiangshan._ 7import xiangshan.cache._ 8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants} 9import xiangshan.backend.LSUOpType 10import xiangshan.mem._ 11import xiangshan.backend.roq.RoqPtr 12import xiangshan.backend.fu.fpu.boxF32ToF64 13 14 15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { } 16 17object LqPtr extends HasXSParameter { 18 def apply(f: Bool, v: UInt): LqPtr = { 19 val ptr = Wire(new LqPtr) 20 ptr.flag := f 21 ptr.value := v 22 ptr 23 } 24} 25 26 27// Load Queue 28class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 29 val io = IO(new Bundle() { 30 val dp1Req = Vec(RenameWidth, Flipped(DecoupledIO(new MicroOp))) 31 val lqReady = Output(Vec(RenameWidth, Bool())) 32 val sqReady = Input(Vec(RenameWidth, Bool())) 33 val lqIdxs = Output(Vec(RenameWidth, new LqPtr)) // LSIdx will be assembled in LSQWrapper 34 val brqRedirect = Input(Valid(new Redirect)) 35 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 36 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only 37 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load 38 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 39 val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit))) 40 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 41 val dcache = new DCacheLineIO 42 val uncache = new DCacheWordIO 43 val roqDeqPtr = Input(new RoqPtr) 44 val exceptionAddr = new ExceptionAddrIO 45 // val refill = Flipped(Valid(new DCacheLineReq )) 46 }) 47 48 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 49 // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) 50 val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth)) 51 dataModule.io := DontCare 52 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 53 val valid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 54 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 55 val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB 56 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 57 val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 58 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 59 60 val ringBufferHeadExtended = RegInit(0.U.asTypeOf(new LqPtr)) 61 val ringBufferTailExtended = RegInit(0.U.asTypeOf(new LqPtr)) 62 val ringBufferHead = ringBufferHeadExtended.value 63 val ringBufferTail = ringBufferTailExtended.value 64 val ringBufferSameFlag = ringBufferHeadExtended.flag === ringBufferTailExtended.flag 65 val ringBufferEmpty = ringBufferHead === ringBufferTail && ringBufferSameFlag 66 val ringBufferFull = ringBufferHead === ringBufferTail && !ringBufferSameFlag 67 val ringBufferAllowin = !ringBufferFull 68 69 val loadCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.LOAD) 70 val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.lqIdx.value) 71 72 val tailMask = (((1.U((LoadQueueSize + 1).W)) << ringBufferTail).asUInt - 1.U)(LoadQueueSize - 1, 0) 73 val headMask = (((1.U((LoadQueueSize + 1).W)) << ringBufferHead).asUInt - 1.U)(LoadQueueSize - 1, 0) 74 val enqDeqMask1 = tailMask ^ headMask 75 val enqDeqMask = Mux(ringBufferSameFlag, enqDeqMask1, ~enqDeqMask1) 76 77 // Enqueue at dispatch 78 val emptyEntries = LoadQueueSize.U - distanceBetween(ringBufferHeadExtended, ringBufferTailExtended) 79 XSDebug("(ready, valid): ") 80 for (i <- 0 until RenameWidth) { 81 val offset = if (i == 0) 0.U else PopCount((0 until i).map(io.dp1Req(_).valid)) 82 val lqIdx = ringBufferHeadExtended + offset 83 val index = lqIdx.value 84 when(io.dp1Req(i).fire()) { 85 uop(index) := io.dp1Req(i).bits 86 allocated(index) := true.B 87 valid(index) := false.B 88 writebacked(index) := false.B 89 commited(index) := false.B 90 miss(index) := false.B 91 listening(index) := false.B 92 pending(index) := false.B 93 } 94 val numTryEnqueue = offset +& io.dp1Req(i).valid 95 io.lqReady(i) := numTryEnqueue <= emptyEntries 96 io.dp1Req(i).ready := io.lqReady(i) && io.sqReady(i) 97 io.lqIdxs(i) := lqIdx 98 XSDebug(false, true.B, "(%d, %d) ", io.dp1Req(i).ready, io.dp1Req(i).valid) 99 } 100 XSDebug(false, true.B, "\n") 101 102 val firedDispatch = VecInit((0 until CommitWidth).map(io.dp1Req(_).fire())).asUInt 103 when(firedDispatch.orR) { 104 ringBufferHeadExtended := ringBufferHeadExtended + PopCount(firedDispatch) 105 XSInfo("dispatched %d insts to lq\n", PopCount(firedDispatch)) 106 } 107 108 // writeback load 109 (0 until LoadPipelineWidth).map(i => { 110 dataModule.io.wb(i).wen := false.B 111 when(io.loadIn(i).fire()) { 112 when(io.loadIn(i).bits.miss) { 113 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 114 io.loadIn(i).bits.uop.lqIdx.asUInt, 115 io.loadIn(i).bits.uop.cf.pc, 116 io.loadIn(i).bits.vaddr, 117 io.loadIn(i).bits.paddr, 118 io.loadIn(i).bits.data, 119 io.loadIn(i).bits.mask, 120 io.loadIn(i).bits.forwardData.asUInt, 121 io.loadIn(i).bits.forwardMask.asUInt, 122 io.loadIn(i).bits.mmio, 123 io.loadIn(i).bits.rollback, 124 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 125 ) 126 }.otherwise { 127 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 128 io.loadIn(i).bits.uop.lqIdx.asUInt, 129 io.loadIn(i).bits.uop.cf.pc, 130 io.loadIn(i).bits.vaddr, 131 io.loadIn(i).bits.paddr, 132 io.loadIn(i).bits.data, 133 io.loadIn(i).bits.mask, 134 io.loadIn(i).bits.forwardData.asUInt, 135 io.loadIn(i).bits.forwardMask.asUInt, 136 io.loadIn(i).bits.mmio, 137 io.loadIn(i).bits.rollback, 138 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 139 ) 140 } 141 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 142 valid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 143 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 144 allocated(loadWbIndex) := !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR 145 146 val loadWbData = Wire(new LsqEntry) 147 loadWbData.paddr := io.loadIn(i).bits.paddr 148 loadWbData.vaddr := io.loadIn(i).bits.vaddr 149 loadWbData.mask := io.loadIn(i).bits.mask 150 loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug 151 loadWbData.mmio := io.loadIn(i).bits.mmio 152 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 153 loadWbData.fwdData := io.loadIn(i).bits.forwardData 154 loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 155 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 156 dataModule.io.wb(i).wen := true.B 157 158 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 159 miss(loadWbIndex) := dcacheMissed 160 listening(loadWbIndex) := dcacheMissed 161 pending(loadWbIndex) := io.loadIn(i).bits.mmio 162 } 163 }) 164 165 // cache miss request 166 val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo)))) 167 val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_) 168 val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt) 169 170 val missRefillSelVec = VecInit( 171 (0 until LoadQueueSize).map{ i => 172 val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_) 173 allocated(i) && miss(i) && !inflight 174 }) 175 176 val missRefillSel = getFirstOne(missRefillSelVec, tailMask) 177 val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr) 178 io.dcache.req.valid := missRefillSelVec.asUInt.orR 179 io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD 180 io.dcache.req.bits.addr := missRefillBlockAddr 181 io.dcache.req.bits.data := DontCare 182 io.dcache.req.bits.mask := DontCare 183 184 io.dcache.req.bits.meta.id := DontCare 185 io.dcache.req.bits.meta.vaddr := DontCare // dataModule.io.rdata(missRefillSel).vaddr 186 io.dcache.req.bits.meta.paddr := missRefillBlockAddr 187 io.dcache.req.bits.meta.uop := uop(missRefillSel) 188 io.dcache.req.bits.meta.mmio := false.B // dataModule.io.rdata(missRefillSel).mmio 189 io.dcache.req.bits.meta.tlb_miss := false.B 190 io.dcache.req.bits.meta.mask := DontCare 191 io.dcache.req.bits.meta.replay := false.B 192 193 io.dcache.resp.ready := true.B 194 195 assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid)) 196 197 when(io.dcache.req.fire()) { 198 miss(missRefillSel) := false.B 199 listening(missRefillSel) := true.B 200 201 // mark this block as inflight 202 inflightReqs(reqBlockIndex).valid := true.B 203 inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr 204 assert(!inflightReqs(reqBlockIndex).valid) 205 } 206 207 when(io.dcache.resp.fire()) { 208 val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_) 209 assert(inflight) 210 for (i <- 0 until cfg.nLoadMissEntries) { 211 when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) { 212 inflightReqs(i).valid := false.B 213 } 214 } 215 } 216 217 218 when(io.dcache.req.fire()){ 219 XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n", 220 io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt, 221 io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr 222 ) 223 } 224 225 when(io.dcache.resp.fire()){ 226 XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n", 227 io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt, 228 io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data 229 ) 230 } 231 232 // Refill 64 bit in a cycle 233 // Refill data comes back from io.dcache.resp 234 dataModule.io.refill.dcache := io.dcache.resp.bits 235 236 (0 until LoadQueueSize).map(i => { 237 val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr 238 dataModule.io.refill.wen(i) := false.B 239 when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) { 240 dataModule.io.refill.wen(i) := true.B 241 valid(i) := true.B 242 listening(i) := false.B 243 } 244 }) 245 246 // writeback up to 2 missed load insts to CDB 247 // just randomly pick 2 missed load (data refilled), write them back to cdb 248 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 249 allocated(i) && valid(i) && !writebacked(i) 250 })).asUInt() // use uint instead vec to reduce verilog lines 251 val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W))) 252 val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool())) 253 val lselvec0 = PriorityEncoderOH(loadWbSelVec) 254 val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt) 255 loadWbSel(0) := OHToUInt(lselvec0) 256 loadWbSelV(0):= lselvec0.orR 257 loadWbSel(1) := OHToUInt(lselvec1) 258 loadWbSelV(1) := lselvec1.orR 259 (0 until StorePipelineWidth).map(i => { 260 // data select 261 val rdata = dataModule.io.rdata(loadWbSel(i)).data 262 val func = uop(loadWbSel(i)).ctrl.fuOpType 263 val raddr = dataModule.io.rdata(loadWbSel(i)).paddr 264 val rdataSel = LookupTree(raddr(2, 0), List( 265 "b000".U -> rdata(63, 0), 266 "b001".U -> rdata(63, 8), 267 "b010".U -> rdata(63, 16), 268 "b011".U -> rdata(63, 24), 269 "b100".U -> rdata(63, 32), 270 "b101".U -> rdata(63, 40), 271 "b110".U -> rdata(63, 48), 272 "b111".U -> rdata(63, 56) 273 )) 274 val rdataPartialLoad = LookupTree(func, List( 275 LSUOpType.lb -> SignExt(rdataSel(7, 0) , XLEN), 276 LSUOpType.lh -> SignExt(rdataSel(15, 0), XLEN), 277 LSUOpType.lw -> SignExt(rdataSel(31, 0), XLEN), 278 LSUOpType.ld -> SignExt(rdataSel(63, 0), XLEN), 279 LSUOpType.lbu -> ZeroExt(rdataSel(7, 0) , XLEN), 280 LSUOpType.lhu -> ZeroExt(rdataSel(15, 0), XLEN), 281 LSUOpType.lwu -> ZeroExt(rdataSel(31, 0), XLEN), 282 LSUOpType.flw -> boxF32ToF64(rdataSel(31, 0)) 283 )) 284 io.ldout(i).bits.uop := uop(loadWbSel(i)) 285 io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools 286 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 287 io.ldout(i).bits.data := rdataPartialLoad 288 io.ldout(i).bits.redirectValid := false.B 289 io.ldout(i).bits.redirect := DontCare 290 io.ldout(i).bits.brUpdate := DontCare 291 io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio 292 io.ldout(i).bits.fflags := DontCare 293 io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i) 294 when(io.ldout(i).fire()) { 295 writebacked(loadWbSel(i)) := true.B 296 XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n", 297 io.ldout(i).bits.uop.roqIdx.asUInt, 298 io.ldout(i).bits.uop.lqIdx.asUInt, 299 io.ldout(i).bits.uop.cf.pc, 300 dataModule.io.rdata(loadWbSel(i)).paddr, 301 dataModule.io.rdata(loadWbSel(i)).data, 302 dataModule.io.rdata(loadWbSel(i)).mmio 303 ) 304 } 305 }) 306 307 // move tailPtr 308 // allocatedMask: dequeuePtr can go to the next 1-bit 309 val allocatedMask = VecInit((0 until LoadQueueSize).map(i => allocated(i) || !enqDeqMask(i))) 310 // find the first one from deqPtr (ringBufferTail) 311 val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, ringBufferTailExtended.flag) 312 val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, ringBufferHeadExtended) 313 ringBufferTailExtended := nextTail 314 315 // When load commited, mark it as !allocated, this entry will be recycled later 316 (0 until CommitWidth).map(i => { 317 when(loadCommit(i)) { 318 allocated(mcommitIdx(i)) := false.B 319 XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 320 } 321 }) 322 323 // rollback check 324 val rollback = Wire(Vec(StorePipelineWidth, Valid(new Redirect))) 325 326 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 327 val length = mask.length 328 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 329 val highBitsUint = Cat(highBits.reverse) 330 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 331 } 332 333 def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = { 334 val length = mask.length 335 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 336 val highBitsUint = Cat(highBits.reverse) 337 val changeDirection = !highBitsUint.orR() 338 val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt)) 339 LqPtr(startFlag ^ changeDirection, index) 340 } 341 342 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 343 assert(valid.length == uop.length) 344 assert(valid.length == 2) 345 Mux(valid(0) && valid(1), 346 Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)), 347 Mux(valid(0) && !valid(1), uop(0), uop(1))) 348 } 349 350 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 351 assert(valid.length == uop.length) 352 val length = valid.length 353 (0 until length).map(i => { 354 (0 until length).map(j => { 355 Mux(valid(i) && valid(j), 356 isAfter(uop(i).roqIdx, uop(j).roqIdx), 357 Mux(!valid(i), true.B, false.B)) 358 }) 359 }) 360 } 361 362 def rangeMask(start: LqPtr, end: LqPtr): UInt = { 363 val startMask = (1.U((LoadQueueSize + 1).W) << start.value).asUInt - 1.U 364 val endMask = (1.U((LoadQueueSize + 1).W) << end.value).asUInt - 1.U 365 val xorMask = startMask(LoadQueueSize - 1, 0) ^ endMask(LoadQueueSize - 1, 0) 366 Mux(start.flag === end.flag, xorMask, ~xorMask) 367 } 368 369 // ignore data forward 370 (0 until LoadPipelineWidth).foreach(i => { 371 io.forward(i).forwardMask := DontCare 372 io.forward(i).forwardData := DontCare 373 }) 374 375 // store backward query and rollback 376 // val needCheck = Seq.fill(8)(WireInit(true.B)) 377 (0 until StorePipelineWidth).foreach(i => { 378 rollback(i) := DontCare 379 380 when(io.storeIn(i).valid) { 381 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 382 val lqIdxMask = ((1.U((LoadQueueSize + 1).W) << startIndex).asUInt - 1.U)(LoadQueueSize - 1, 0) 383 val xorMask = lqIdxMask ^ headMask 384 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === ringBufferHeadExtended.flag 385 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 386 387 // check if load already in lq needs to be rolledback 388 val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => { 389 val addrMatch = allocated(j) && 390 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3) 391 val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (valid(j) || listening(j) || miss(j)) 392 // TODO: update refilled data 393 val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k)) 394 Cat(violationVec).orR() && entryNeedCheck 395 })) 396 val lqViolation = lqViolationVec.asUInt().orR() 397 val lqViolationIndex = getFirstOne(lqViolationVec, lqIdxMask) 398 val lqViolationUop = uop(lqViolationIndex) 399 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 400 401 // when l/s writeback to roq together, check if rollback is needed 402 val wbViolationVec = VecInit((0 until LoadPipelineWidth).map(j => { 403 io.loadIn(j).valid && 404 isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 405 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 406 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 407 })) 408 val wbViolation = wbViolationVec.asUInt().orR() 409 val wbViolationUop = getOldestInTwo(wbViolationVec, io.loadIn.map(_.bits.uop)) 410 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 411 412 // check if rollback is needed for load in l1 413 val l1ViolationVec = VecInit((0 until LoadPipelineWidth).map(j => { 414 io.forward(j).valid && // L4 valid\ 415 isAfter(io.forward(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 416 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.forward(j).paddr(PAddrBits - 1, 3) && 417 (io.storeIn(i).bits.mask & io.forward(j).mask).orR 418 })) 419 val l1Violation = l1ViolationVec.asUInt().orR() 420 val l1ViolationUop = getOldestInTwo(l1ViolationVec, io.forward.map(_.uop)) 421 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 422 423 val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation) 424 val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop) 425 rollback(i).valid := Cat(rollbackValidVec).orR 426 val mask = getAfterMask(rollbackValidVec, rollbackUopVec) 427 val oneAfterZero = mask(1)(0) 428 val rollbackUop = Mux(oneAfterZero && mask(2)(0), 429 rollbackUopVec(0), 430 Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2))) 431 rollback(i).bits.roqIdx := rollbackUop.roqIdx - 1.U 432 433 rollback(i).bits.isReplay := true.B 434 rollback(i).bits.isMisPred := false.B 435 rollback(i).bits.isException := false.B 436 rollback(i).bits.isFlushPipe := false.B 437 438 XSDebug( 439 l1Violation, 440 "need rollback (l4 load) pc %x roqidx %d target %x\n", 441 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt 442 ) 443 XSDebug( 444 lqViolation, 445 "need rollback (ld wb before store) pc %x roqidx %d target %x\n", 446 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt 447 ) 448 XSDebug( 449 wbViolation, 450 "need rollback (ld/st wb together) pc %x roqidx %d target %x\n", 451 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt 452 ) 453 }.otherwise { 454 rollback(i).valid := false.B 455 } 456 }) 457 458 def rollbackSel(a: Valid[Redirect], b: Valid[Redirect]): ValidIO[Redirect] = { 459 Mux( 460 a.valid, 461 Mux( 462 b.valid, 463 Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest 464 a // sel a 465 ), 466 b // sel b 467 ) 468 } 469 470 io.rollback := ParallelOperation(rollback, rollbackSel) 471 472 // Memory mapped IO / other uncached operations 473 474 // setup misc mem access req 475 // mask / paddr / data can be get from lq.data 476 val commitType = io.commits(0).bits.uop.ctrl.commitType 477 io.uncache.req.valid := pending(ringBufferTail) && allocated(ringBufferTail) && 478 commitType === CommitType.LOAD && 479 io.roqDeqPtr === uop(ringBufferTail).roqIdx && 480 !io.commits(0).bits.isWalk 481 482 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 483 io.uncache.req.bits.addr := dataModule.io.rdata(ringBufferTail).paddr 484 io.uncache.req.bits.data := dataModule.io.rdata(ringBufferTail).data 485 io.uncache.req.bits.mask := dataModule.io.rdata(ringBufferTail).mask 486 487 io.uncache.req.bits.meta.id := DontCare // TODO: // FIXME 488 io.uncache.req.bits.meta.vaddr := DontCare 489 io.uncache.req.bits.meta.paddr := dataModule.io.rdata(ringBufferTail).paddr 490 io.uncache.req.bits.meta.uop := uop(ringBufferTail) 491 io.uncache.req.bits.meta.mmio := true.B // dataModule.io.rdata(ringBufferTail).mmio 492 io.uncache.req.bits.meta.tlb_miss := false.B 493 io.uncache.req.bits.meta.mask := dataModule.io.rdata(ringBufferTail).mask 494 io.uncache.req.bits.meta.replay := false.B 495 496 io.uncache.resp.ready := true.B 497 498 when(io.uncache.req.fire()){ 499 pending(ringBufferTail) := false.B 500 } 501 502 dataModule.io.uncache.wen := false.B 503 when(io.uncache.resp.fire()){ 504 valid(ringBufferTail) := true.B 505 dataModule.io.uncacheWrite(ringBufferTail, io.uncache.resp.bits.data(XLEN-1, 0)) 506 dataModule.io.uncache.wen := true.B 507 // TODO: write back exception info 508 } 509 510 when(io.uncache.req.fire()){ 511 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 512 uop(ringBufferTail).cf.pc, 513 io.uncache.req.bits.addr, 514 io.uncache.req.bits.data, 515 io.uncache.req.bits.cmd, 516 io.uncache.req.bits.mask 517 ) 518 } 519 520 when(io.uncache.resp.fire()){ 521 XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data) 522 } 523 524 // Read vaddr for mem exception 525 io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr 526 527 // misprediction recovery / exception redirect 528 // invalidate lq term using robIdx 529 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 530 for (i <- 0 until LoadQueueSize) { 531 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 532 when(needCancel(i)) { 533 when(io.brqRedirect.bits.isReplay){ 534 valid(i) := false.B 535 writebacked(i) := false.B 536 listening(i) := false.B 537 miss(i) := false.B 538 pending(i) := false.B 539 }.otherwise{ 540 allocated(i) := false.B 541 } 542 } 543 } 544 when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) { 545 ringBufferHeadExtended := ringBufferHeadExtended - PopCount(needCancel) 546 } 547 548 // assert(!io.rollback.valid) 549 when(io.rollback.valid) { 550 XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt) 551 } 552 553 // debug info 554 XSDebug("head %d:%d tail %d:%d\n", ringBufferHeadExtended.flag, ringBufferHead, ringBufferTailExtended.flag, ringBufferTail) 555 556 def PrintFlag(flag: Bool, name: String): Unit = { 557 when(flag) { 558 XSDebug(false, true.B, name) 559 }.otherwise { 560 XSDebug(false, true.B, " ") 561 } 562 } 563 564 for (i <- 0 until LoadQueueSize) { 565 if (i % 4 == 0) XSDebug("") 566 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr) 567 PrintFlag(allocated(i), "a") 568 PrintFlag(allocated(i) && valid(i), "v") 569 PrintFlag(allocated(i) && writebacked(i), "w") 570 PrintFlag(allocated(i) && commited(i), "c") 571 PrintFlag(allocated(i) && miss(i), "m") 572 PrintFlag(allocated(i) && listening(i), "l") 573 PrintFlag(allocated(i) && pending(i), "p") 574 XSDebug(false, true.B, " ") 575 if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n") 576 } 577 578} 579