1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import utils._ 6import xiangshan._ 7import xiangshan.cache._ 8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants} 9import xiangshan.backend.LSUOpType 10import xiangshan.backend.roq.RoqPtr 11 12 13class SqPtr extends CircularQueuePtr(SqPtr.StoreQueueSize) { } 14 15object SqPtr extends HasXSParameter { 16 def apply(f: Bool, v: UInt): SqPtr = { 17 val ptr = Wire(new SqPtr) 18 ptr.flag := f 19 ptr.value := v 20 ptr 21 } 22} 23 24// Store Queue 25class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 26 val io = IO(new Bundle() { 27 val enq = new Bundle() { 28 val canAccept = Output(Bool()) 29 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 30 val resp = Vec(RenameWidth, Output(new SqPtr)) 31 } 32 val brqRedirect = Input(Valid(new Redirect)) 33 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 34 val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq)) 35 val stout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store 36 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 37 val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit))) 38 val uncache = new DCacheWordIO 39 val roqDeqPtr = Input(new RoqPtr) 40 // val refill = Flipped(Valid(new DCacheLineReq )) 41 val oldestStore = Output(Valid(new RoqPtr)) 42 val exceptionAddr = new ExceptionAddrIO 43 }) 44 45 val uop = Reg(Vec(StoreQueueSize, new MicroOp)) 46 // val data = Reg(Vec(StoreQueueSize, new LsqEntry)) 47 val dataModule = Module(new LSQueueData(StoreQueueSize, StorePipelineWidth)) 48 dataModule.io := DontCare 49 val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated 50 val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid 51 val writebacked = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been writebacked to CDB 52 val commited = Reg(Vec(StoreQueueSize, Bool())) // inst has been commited by roq 53 val pending = Reg(Vec(StoreQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 54 55 val ringBufferHeadExtended = RegInit(0.U.asTypeOf(new SqPtr)) 56 val ringBufferTailExtended = RegInit(0.U.asTypeOf(new SqPtr)) 57 val ringBufferHead = ringBufferHeadExtended.value 58 val ringBufferTail = ringBufferTailExtended.value 59 val ringBufferSameFlag = ringBufferHeadExtended.flag === ringBufferTailExtended.flag 60 val ringBufferEmpty = ringBufferHead === ringBufferTail && ringBufferSameFlag 61 val ringBufferFull = ringBufferHead === ringBufferTail && !ringBufferSameFlag 62 val ringBufferAllowin = !ringBufferFull 63 64 val storeCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.STORE) 65 val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.sqIdx.value) 66 67 val tailMask = (((1.U((StoreQueueSize + 1).W)) << ringBufferTail).asUInt - 1.U)(StoreQueueSize - 1, 0) 68 val headMask = (((1.U((StoreQueueSize + 1).W)) << ringBufferHead).asUInt - 1.U)(StoreQueueSize - 1, 0) 69 val enqDeqMask1 = tailMask ^ headMask 70 val enqDeqMask = Mux(ringBufferSameFlag, enqDeqMask1, ~enqDeqMask1) 71 72 // Enqueue at dispatch 73 val validEntries = distanceBetween(ringBufferHeadExtended, ringBufferTailExtended) 74 val firedDispatch = io.enq.req.map(_.valid) 75 io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U 76 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n") 77 for (i <- 0 until RenameWidth) { 78 val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_))) 79 val sqIdx = ringBufferHeadExtended + offset 80 val index = sqIdx.value 81 when(io.enq.req(i).valid) { 82 uop(index) := io.enq.req(i).bits 83 allocated(index) := true.B 84 datavalid(index) := false.B 85 writebacked(index) := false.B 86 commited(index) := false.B 87 pending(index) := false.B 88 } 89 io.enq.resp(i) := sqIdx 90 91 XSError(!io.enq.canAccept && io.enq.req(i).valid, "should not valid when not ready\n") 92 } 93 94 when(Cat(firedDispatch).orR) { 95 ringBufferHeadExtended := ringBufferHeadExtended + PopCount(firedDispatch) 96 XSInfo("dispatched %d insts to sq\n", PopCount(firedDispatch)) 97 } 98 99 // writeback store 100 (0 until StorePipelineWidth).map(i => { 101 dataModule.io.wb(i).wen := false.B 102 when(io.storeIn(i).fire()) { 103 val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value 104 val hasException = io.storeIn(i).bits.uop.cf.exceptionVec.asUInt.orR 105 datavalid(stWbIndex) := !io.storeIn(i).bits.mmio || hasException 106 pending(stWbIndex) := io.storeIn(i).bits.mmio && !hasException 107 108 val storeWbData = Wire(new LsqEntry) 109 storeWbData := DontCare 110 storeWbData.paddr := io.storeIn(i).bits.paddr 111 storeWbData.vaddr := io.storeIn(i).bits.vaddr 112 storeWbData.mask := io.storeIn(i).bits.mask 113 storeWbData.data := io.storeIn(i).bits.data 114 storeWbData.mmio := io.storeIn(i).bits.mmio 115 storeWbData.exception := io.storeIn(i).bits.uop.cf.exceptionVec.asUInt 116 117 dataModule.io.wbWrite(i, stWbIndex, storeWbData) 118 dataModule.io.wb(i).wen := true.B 119 120 XSInfo("store write to sq idx %d pc 0x%x vaddr %x paddr %x data %x mmio %x roll %x exc %x\n", 121 io.storeIn(i).bits.uop.sqIdx.value, 122 io.storeIn(i).bits.uop.cf.pc, 123 io.storeIn(i).bits.vaddr, 124 io.storeIn(i).bits.paddr, 125 io.storeIn(i).bits.data, 126 io.storeIn(i).bits.mmio, 127 io.storeIn(i).bits.rollback, 128 io.storeIn(i).bits.uop.cf.exceptionVec.asUInt 129 ) 130 } 131 }) 132 133 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 134 val length = mask.length 135 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 136 val highBitsUint = Cat(highBits.reverse) 137 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 138 } 139 140 def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = { 141 val length = mask.length 142 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 143 val highBitsUint = Cat(highBits.reverse) 144 val changeDirection = !highBitsUint.orR() 145 val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt)) 146 SqPtr(startFlag ^ changeDirection, index) 147 } 148 149 def selectFirstTwo(valid: Vec[Bool], startMask: UInt) = { 150 val selVec = Wire(Vec(2, UInt(log2Up(StoreQueueSize).W))) 151 val selValid = Wire(Vec(2, Bool())) 152 selVec(0) := getFirstOne(valid, startMask) 153 val firstSelMask = UIntToOH(selVec(0)) 154 val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !firstSelMask(i))) 155 selVec(1) := getFirstOne(secondSelVec, startMask) 156 selValid(0) := Cat(valid).orR 157 selValid(1) := Cat(secondSelVec).orR 158 (selValid, selVec) 159 } 160 161 def selectFirstTwoRoughly(valid: Vec[Bool]) = { 162 // TODO: do not select according to seq, just select 2 valid bit randomly 163 val firstSelVec = valid 164 val notFirstVec = Wire(Vec(valid.length, Bool())) 165 (0 until valid.length).map(i => 166 notFirstVec(i) := (if(i != 0) { valid(i) || !notFirstVec(i) } else { false.B }) 167 ) 168 val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !notFirstVec(i))) 169 170 val selVec = Wire(Vec(2, UInt(log2Up(valid.length).W))) 171 val selValid = Wire(Vec(2, Bool())) 172 selVec(0) := PriorityEncoder(firstSelVec) 173 selVec(1) := PriorityEncoder(secondSelVec) 174 selValid(0) := Cat(firstSelVec).orR 175 selValid(1) := Cat(secondSelVec).orR 176 (selValid, selVec) 177 } 178 179 // select the last writebacked instruction 180 val validStoreVec = VecInit((0 until StoreQueueSize).map(i => !(allocated(i) && datavalid(i)))) 181 val storeNotValid = SqPtr(false.B, getFirstOne(validStoreVec, tailMask)) 182 val storeValidIndex = (storeNotValid - 1.U).value 183 io.oldestStore.valid := allocated(ringBufferTailExtended.value) && datavalid(ringBufferTailExtended.value) && !commited(storeValidIndex) 184 io.oldestStore.bits := uop(storeValidIndex).roqIdx 185 186 // writeback up to 2 store insts to CDB 187 // choose the first two valid store requests from deqPtr 188 val storeWbSelVec = VecInit((0 until StoreQueueSize).map(i => allocated(i) && datavalid(i) && !writebacked(i))) 189 val (storeWbValid, storeWbSel) = selectFirstTwo(storeWbSelVec, tailMask) 190 191 (0 until StorePipelineWidth).map(i => { 192 io.stout(i).bits.uop := uop(storeWbSel(i)) 193 io.stout(i).bits.uop.sqIdx := storeWbSel(i).asTypeOf(new SqPtr) 194 io.stout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(storeWbSel(i)).exception.asBools 195 io.stout(i).bits.data := dataModule.io.rdata(storeWbSel(i)).data 196 io.stout(i).bits.redirectValid := false.B 197 io.stout(i).bits.redirect := DontCare 198 io.stout(i).bits.brUpdate := DontCare 199 io.stout(i).bits.debug.isMMIO := dataModule.io.rdata(storeWbSel(i)).mmio 200 io.stout(i).valid := storeWbSelVec(storeWbSel(i)) && storeWbValid(i) 201 when(io.stout(i).fire()) { 202 writebacked(storeWbSel(i)) := true.B 203 } 204 io.stout(i).bits.fflags := DontCare 205 }) 206 207 // remove retired insts from sq, add retired store to sbuffer 208 209 // move tailPtr 210 // TailPtr slow recovery: recycle bubbles in store queue 211 // allocatedMask: dequeuePtr can go to the next 1-bit 212 val allocatedMask = VecInit((0 until StoreQueueSize).map(i => allocated(i) || !enqDeqMask(i))) 213 // find the first one from deqPtr (ringBufferTail) 214 val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, ringBufferTailExtended.flag) 215 val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, ringBufferHeadExtended) 216 ringBufferTailExtended := nextTail 217 218 // TailPtr fast recovery 219 val tailRecycle = VecInit(List( 220 io.uncache.resp.fire() || io.sbuffer(0).fire(), 221 io.sbuffer(1).fire() 222 )) 223 224 when(tailRecycle.asUInt.orR){ 225 ringBufferTailExtended := ringBufferTailExtended + PopCount(tailRecycle.asUInt) 226 } 227 228 // load forward query 229 // check over all lq entries and forward data from the first matched store 230 (0 until LoadPipelineWidth).map(i => { 231 io.forward(i).forwardMask := 0.U(8.W).asBools 232 io.forward(i).forwardData := DontCare 233 234 // Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases: 235 // (1) if they have the same flag, we need to check range(tail, sqIdx) 236 // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx) 237 // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize)) 238 // Forward2: Mux(same_flag, 0.U, range(0, sqIdx) ) 239 // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise 240 241 val differentFlag = ringBufferTailExtended.flag =/= io.forward(i).sqIdx.flag 242 val forwardMask = ((1.U((StoreQueueSize + 1).W)) << io.forward(i).sqIdx.value).asUInt - 1.U 243 val storeWritebackedVec = WireInit(VecInit(Seq.fill(StoreQueueSize)(false.B))) 244 for (j <- 0 until StoreQueueSize) { 245 storeWritebackedVec(j) := datavalid(j) && allocated(j) // all datavalid terms need to be checked 246 } 247 val needForward1 = Mux(differentFlag, ~tailMask, tailMask ^ forwardMask) & storeWritebackedVec.asUInt 248 val needForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & storeWritebackedVec.asUInt 249 250 XSDebug("" + i + " f1 %b f2 %b sqIdx %d pa %x\n", needForward1, needForward2, io.forward(i).sqIdx.asUInt, io.forward(i).paddr) 251 252 // do real fwd query 253 dataModule.io.forwardQuery( 254 channel = i, 255 paddr = io.forward(i).paddr, 256 needForward1 = needForward1, 257 needForward2 = needForward2 258 ) 259 260 io.forward(i).forwardMask := dataModule.io.forward(i).forwardMask 261 io.forward(i).forwardData := dataModule.io.forward(i).forwardData 262 }) 263 264 // When store commited, mark it as commited (will not be influenced by redirect), 265 (0 until CommitWidth).map(i => { 266 when(storeCommit(i)) { 267 commited(mcommitIdx(i)) := true.B 268 XSDebug("store commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 269 } 270 }) 271 272 (0 until 2).map(i => { 273 val ptr = (ringBufferTailExtended + i.U).value 274 val mmio = dataModule.io.rdata(ptr).mmio 275 io.sbuffer(i).valid := allocated(ptr) && commited(ptr) && !mmio 276 io.sbuffer(i).bits.cmd := MemoryOpConstants.M_XWR 277 io.sbuffer(i).bits.addr := dataModule.io.rdata(ptr).paddr 278 io.sbuffer(i).bits.data := dataModule.io.rdata(ptr).data 279 io.sbuffer(i).bits.mask := dataModule.io.rdata(ptr).mask 280 io.sbuffer(i).bits.meta := DontCare 281 io.sbuffer(i).bits.meta.tlb_miss := false.B 282 io.sbuffer(i).bits.meta.uop := DontCare 283 io.sbuffer(i).bits.meta.mmio := mmio 284 io.sbuffer(i).bits.meta.mask := dataModule.io.rdata(ptr).mask 285 286 when(io.sbuffer(i).fire()) { 287 allocated(ptr) := false.B 288 XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr) 289 } 290 }) 291 292 // Memory mapped IO / other uncached operations 293 294 // setup misc mem access req 295 // mask / paddr / data can be get from sq.data 296 val commitType = io.commits(0).bits.uop.ctrl.commitType 297 io.uncache.req.valid := pending(ringBufferTail) && allocated(ringBufferTail) && 298 commitType === CommitType.STORE && 299 io.roqDeqPtr === uop(ringBufferTail).roqIdx && 300 !io.commits(0).bits.isWalk 301 302 io.uncache.req.bits.cmd := MemoryOpConstants.M_XWR 303 io.uncache.req.bits.addr := dataModule.io.rdata(ringBufferTail).paddr 304 io.uncache.req.bits.data := dataModule.io.rdata(ringBufferTail).data 305 io.uncache.req.bits.mask := dataModule.io.rdata(ringBufferTail).mask 306 307 io.uncache.req.bits.meta.id := DontCare // TODO: // FIXME 308 io.uncache.req.bits.meta.vaddr := DontCare 309 io.uncache.req.bits.meta.paddr := dataModule.io.rdata(ringBufferTail).paddr 310 io.uncache.req.bits.meta.uop := uop(ringBufferTail) 311 io.uncache.req.bits.meta.mmio := true.B // dataModule.io.rdata(ringBufferTail).mmio 312 io.uncache.req.bits.meta.tlb_miss := false.B 313 io.uncache.req.bits.meta.mask := dataModule.io.rdata(ringBufferTail).mask 314 io.uncache.req.bits.meta.replay := false.B 315 316 io.uncache.resp.ready := true.B 317 318 when(io.uncache.req.fire()){ 319 pending(ringBufferTail) := false.B 320 } 321 322 when(io.uncache.resp.fire()){ 323 datavalid(ringBufferTail) := true.B // will be writeback to CDB in the next cycle 324 // TODO: write back exception info 325 } 326 327 when(io.uncache.req.fire()){ 328 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 329 uop(ringBufferTail).cf.pc, 330 io.uncache.req.bits.addr, 331 io.uncache.req.bits.data, 332 io.uncache.req.bits.cmd, 333 io.uncache.req.bits.mask 334 ) 335 } 336 337 // Read vaddr for mem exception 338 io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.sqIdx.value).vaddr 339 340 // misprediction recovery / exception redirect 341 // invalidate sq term using robIdx 342 val needCancel = Wire(Vec(StoreQueueSize, Bool())) 343 for (i <- 0 until StoreQueueSize) { 344 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 345 when(needCancel(i)) { 346 when(io.brqRedirect.bits.isReplay){ 347 datavalid(i) := false.B 348 writebacked(i) := false.B 349 pending(i) := false.B 350 }.otherwise{ 351 allocated(i) := false.B 352 } 353 } 354 } 355 when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) { 356 ringBufferHeadExtended := ringBufferHeadExtended - PopCount(needCancel) 357 } 358 359 // debug info 360 XSDebug("head %d:%d tail %d:%d\n", ringBufferHeadExtended.flag, ringBufferHead, ringBufferTailExtended.flag, ringBufferTail) 361 362 def PrintFlag(flag: Bool, name: String): Unit = { 363 when(flag) { 364 XSDebug(false, true.B, name) 365 }.otherwise { 366 XSDebug(false, true.B, " ") 367 } 368 } 369 370 for (i <- 0 until StoreQueueSize) { 371 if (i % 4 == 0) XSDebug("") 372 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr) 373 PrintFlag(allocated(i), "a") 374 PrintFlag(allocated(i) && datavalid(i), "v") 375 PrintFlag(allocated(i) && writebacked(i), "w") 376 PrintFlag(allocated(i) && commited(i), "c") 377 PrintFlag(allocated(i) && pending(i), "p") 378 XSDebug(false, true.B, " ") 379 if (i % 4 == 3 || i == StoreQueueSize - 1) XSDebug(false, true.B, "\n") 380 } 381 382} 383