xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision c8b1e4db9cf506f40d3cbddfbd259cfecf0168b7)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import utils._
6import xiangshan._
7import xiangshan.cache._
8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
9import xiangshan.backend.LSUOpType
10import xiangshan.mem._
11import xiangshan.backend.roq.RoqPtr
12import xiangshan.backend.fu.fpu.boxF32ToF64
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26
27// Load Queue
28class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
29  val io = IO(new Bundle() {
30    val enq = new Bundle() {
31      val canAccept = Output(Bool())
32      val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
33      val resp = Vec(RenameWidth, Output(new LqPtr))
34    }
35    val brqRedirect = Input(Valid(new Redirect))
36    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
37    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only
38    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load
39    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
40    val commits = Flipped(new RoqCommitIO)
41    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
42    val dcache = new DCacheLineIO
43    val uncache = new DCacheWordIO
44    val roqDeqPtr = Input(new RoqPtr)
45    val exceptionAddr = new ExceptionAddrIO
46    // val refill = Flipped(Valid(new DCacheLineReq ))
47  })
48
49  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
50  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
51  val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth))
52  dataModule.io := DontCare
53  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
54  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
55  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
56  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
57  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
58  val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
59  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
60
61  val enqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
62  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
63  val enqPtr = enqPtrExt.value
64  val deqPtr = deqPtrExt.value
65  val sameFlag = enqPtrExt.flag === deqPtrExt.flag
66  val isEmpty = enqPtr === deqPtr && sameFlag
67  val isFull = enqPtr === deqPtr && !sameFlag
68  val allowIn = !isFull
69
70  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.uop(i).ctrl.commitType === CommitType.LOAD)
71  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.uop(i).lqIdx.value)
72
73  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
74  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
75  val enqDeqMask1 = deqMask ^ enqMask
76  val enqDeqMask = Mux(sameFlag, enqDeqMask1, ~enqDeqMask1)
77
78  // Enqueue at dispatch
79  val validEntries = distanceBetween(enqPtrExt, deqPtrExt)
80  val firedDispatch = io.enq.req.map(_.valid)
81  io.enq.canAccept := validEntries <= (LoadQueueSize - RenameWidth).U
82  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n")
83  for (i <- 0 until RenameWidth) {
84    val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_)))
85    val lqIdx = enqPtrExt + offset
86    val index = lqIdx.value
87    when(io.enq.req(i).valid) {
88      uop(index) := io.enq.req(i).bits
89      allocated(index) := true.B
90      datavalid(index) := false.B
91      writebacked(index) := false.B
92      commited(index) := false.B
93      miss(index) := false.B
94      listening(index) := false.B
95      pending(index) := false.B
96    }
97    io.enq.resp(i) := lqIdx
98
99    XSError(!io.enq.canAccept && io.enq.req(i).valid, "should not valid when not ready\n")
100  }
101
102  when(Cat(firedDispatch).orR) {
103    enqPtrExt := enqPtrExt + PopCount(firedDispatch)
104    XSInfo("dispatched %d insts to lq\n", PopCount(firedDispatch))
105  }
106
107  // writeback load
108  (0 until LoadPipelineWidth).map(i => {
109    dataModule.io.wb(i).wen := false.B
110    when(io.loadIn(i).fire()) {
111      when(io.loadIn(i).bits.miss) {
112        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
113          io.loadIn(i).bits.uop.lqIdx.asUInt,
114          io.loadIn(i).bits.uop.cf.pc,
115          io.loadIn(i).bits.vaddr,
116          io.loadIn(i).bits.paddr,
117          io.loadIn(i).bits.data,
118          io.loadIn(i).bits.mask,
119          io.loadIn(i).bits.forwardData.asUInt,
120          io.loadIn(i).bits.forwardMask.asUInt,
121          io.loadIn(i).bits.mmio,
122          io.loadIn(i).bits.rollback,
123          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
124          )
125        }.otherwise {
126          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
127          io.loadIn(i).bits.uop.lqIdx.asUInt,
128          io.loadIn(i).bits.uop.cf.pc,
129          io.loadIn(i).bits.vaddr,
130          io.loadIn(i).bits.paddr,
131          io.loadIn(i).bits.data,
132          io.loadIn(i).bits.mask,
133          io.loadIn(i).bits.forwardData.asUInt,
134          io.loadIn(i).bits.forwardMask.asUInt,
135          io.loadIn(i).bits.mmio,
136          io.loadIn(i).bits.rollback,
137          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
138          )
139        }
140        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
141        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
142        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
143        allocated(loadWbIndex) := !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
144
145        val loadWbData = Wire(new LsqEntry)
146        loadWbData.paddr := io.loadIn(i).bits.paddr
147        loadWbData.vaddr := io.loadIn(i).bits.vaddr
148        loadWbData.mask := io.loadIn(i).bits.mask
149        loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug
150        loadWbData.mmio := io.loadIn(i).bits.mmio
151        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
152        loadWbData.fwdData := io.loadIn(i).bits.forwardData
153        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
154        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
155        dataModule.io.wb(i).wen := true.B
156
157        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
158        miss(loadWbIndex) := dcacheMissed
159        listening(loadWbIndex) := dcacheMissed
160        pending(loadWbIndex) := io.loadIn(i).bits.mmio
161      }
162    })
163
164  // cache miss request
165  val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo))))
166  val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_)
167  val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt)
168
169  val missRefillSelVec = VecInit(
170    (0 until LoadQueueSize).map{ i =>
171      val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_)
172      allocated(i) && miss(i) && !inflight
173    })
174
175  val missRefillSel = getFirstOne(missRefillSelVec, deqMask)
176  val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr)
177  io.dcache.req.valid := missRefillSelVec.asUInt.orR
178  io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD
179  io.dcache.req.bits.addr := missRefillBlockAddr
180  io.dcache.req.bits.data := DontCare
181  io.dcache.req.bits.mask := DontCare
182
183  io.dcache.req.bits.meta.id       := DontCare
184  io.dcache.req.bits.meta.vaddr    := DontCare // dataModule.io.rdata(missRefillSel).vaddr
185  io.dcache.req.bits.meta.paddr    := missRefillBlockAddr
186  io.dcache.req.bits.meta.uop      := uop(missRefillSel)
187  io.dcache.req.bits.meta.mmio     := false.B // dataModule.io.rdata(missRefillSel).mmio
188  io.dcache.req.bits.meta.tlb_miss := false.B
189  io.dcache.req.bits.meta.mask     := DontCare
190  io.dcache.req.bits.meta.replay   := false.B
191
192  io.dcache.resp.ready := true.B
193
194  assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid))
195
196  when(io.dcache.req.fire()) {
197    miss(missRefillSel) := false.B
198    listening(missRefillSel) := true.B
199
200    // mark this block as inflight
201    inflightReqs(reqBlockIndex).valid := true.B
202    inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr
203    assert(!inflightReqs(reqBlockIndex).valid)
204  }
205
206  when(io.dcache.resp.fire()) {
207    val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_)
208    assert(inflight)
209    for (i <- 0 until cfg.nLoadMissEntries) {
210      when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) {
211        inflightReqs(i).valid := false.B
212      }
213    }
214  }
215
216
217  when(io.dcache.req.fire()){
218    XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n",
219      io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt,
220      io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr
221    )
222  }
223
224  when(io.dcache.resp.fire()){
225    XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n",
226      io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt,
227      io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data
228    )
229  }
230
231  // Refill 64 bit in a cycle
232  // Refill data comes back from io.dcache.resp
233  dataModule.io.refill.dcache := io.dcache.resp.bits
234
235  (0 until LoadQueueSize).map(i => {
236    val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr
237    dataModule.io.refill.wen(i) := false.B
238    when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) {
239      dataModule.io.refill.wen(i) := true.B
240      datavalid(i) := true.B
241      listening(i) := false.B
242    }
243  })
244
245  // writeback up to 2 missed load insts to CDB
246  // just randomly pick 2 missed load (data refilled), write them back to cdb
247  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
248    allocated(i) && datavalid(i) && !writebacked(i)
249  })).asUInt() // use uint instead vec to reduce verilog lines
250  val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W)))
251  val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool()))
252  val lselvec0 = PriorityEncoderOH(loadWbSelVec)
253  val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt)
254  loadWbSel(0) := OHToUInt(lselvec0)
255  loadWbSelV(0):= lselvec0.orR
256  loadWbSel(1) := OHToUInt(lselvec1)
257  loadWbSelV(1) := lselvec1.orR
258  (0 until StorePipelineWidth).map(i => {
259    // data select
260    val rdata = dataModule.io.rdata(loadWbSel(i)).data
261    val func = uop(loadWbSel(i)).ctrl.fuOpType
262    val raddr = dataModule.io.rdata(loadWbSel(i)).paddr
263    val rdataSel = LookupTree(raddr(2, 0), List(
264      "b000".U -> rdata(63, 0),
265      "b001".U -> rdata(63, 8),
266      "b010".U -> rdata(63, 16),
267      "b011".U -> rdata(63, 24),
268      "b100".U -> rdata(63, 32),
269      "b101".U -> rdata(63, 40),
270      "b110".U -> rdata(63, 48),
271      "b111".U -> rdata(63, 56)
272    ))
273    val rdataPartialLoad = LookupTree(func, List(
274        LSUOpType.lb   -> SignExt(rdataSel(7, 0) , XLEN),
275        LSUOpType.lh   -> SignExt(rdataSel(15, 0), XLEN),
276        LSUOpType.lw   -> SignExt(rdataSel(31, 0), XLEN),
277        LSUOpType.ld   -> SignExt(rdataSel(63, 0), XLEN),
278        LSUOpType.lbu  -> ZeroExt(rdataSel(7, 0) , XLEN),
279        LSUOpType.lhu  -> ZeroExt(rdataSel(15, 0), XLEN),
280        LSUOpType.lwu  -> ZeroExt(rdataSel(31, 0), XLEN),
281        LSUOpType.flw  -> boxF32ToF64(rdataSel(31, 0))
282    ))
283    io.ldout(i).bits.uop := uop(loadWbSel(i))
284    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools
285    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
286    io.ldout(i).bits.data := rdataPartialLoad
287    io.ldout(i).bits.redirectValid := false.B
288    io.ldout(i).bits.redirect := DontCare
289    io.ldout(i).bits.brUpdate := DontCare
290    io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio
291    io.ldout(i).bits.fflags := DontCare
292    io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i)
293    when(io.ldout(i).fire()) {
294      writebacked(loadWbSel(i)) := true.B
295      XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
296        io.ldout(i).bits.uop.roqIdx.asUInt,
297        io.ldout(i).bits.uop.lqIdx.asUInt,
298        io.ldout(i).bits.uop.cf.pc,
299        dataModule.io.rdata(loadWbSel(i)).paddr,
300        dataModule.io.rdata(loadWbSel(i)).data,
301        dataModule.io.rdata(loadWbSel(i)).mmio
302      )
303    }
304  })
305
306  // move tailPtr
307  // allocatedMask: dequeuePtr can go to the next 1-bit
308  val allocatedMask = VecInit((0 until LoadQueueSize).map(i => allocated(i) || !enqDeqMask(i)))
309  // find the first one from deqPtr (deqPtr)
310  val nextTail1 = getFirstOneWithFlag(allocatedMask, deqMask, deqPtrExt.flag)
311  val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, enqPtrExt)
312  deqPtrExt := nextTail
313
314  // When load commited, mark it as !allocated, this entry will be recycled later
315  (0 until CommitWidth).map(i => {
316    when(loadCommit(i)) {
317      allocated(mcommitIdx(i)) := false.B
318      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
319    }
320  })
321
322  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
323    val length = mask.length
324    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
325    val highBitsUint = Cat(highBits.reverse)
326    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
327  }
328
329  def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = {
330    val length = mask.length
331    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
332    val highBitsUint = Cat(highBits.reverse)
333    val changeDirection = !highBitsUint.orR()
334    val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt))
335    LqPtr(startFlag ^ changeDirection, index)
336  }
337
338  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
339    assert(valid.length == uop.length)
340    assert(valid.length == 2)
341    Mux(valid(0) && valid(1),
342      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
343      Mux(valid(0) && !valid(1), uop(0), uop(1)))
344  }
345
346  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
347    assert(valid.length == uop.length)
348    val length = valid.length
349    (0 until length).map(i => {
350      (0 until length).map(j => {
351        Mux(valid(i) && valid(j),
352          isAfter(uop(i).roqIdx, uop(j).roqIdx),
353          Mux(!valid(i), true.B, false.B))
354      })
355    })
356  }
357
358  def rangeMask(start: LqPtr, end: LqPtr): UInt = {
359    val startMask = (1.U((LoadQueueSize + 1).W) << start.value).asUInt - 1.U
360    val endMask = (1.U((LoadQueueSize + 1).W) << end.value).asUInt - 1.U
361    val xorMask = startMask(LoadQueueSize - 1, 0) ^ endMask(LoadQueueSize - 1, 0)
362    Mux(start.flag === end.flag, xorMask, ~xorMask)
363  }
364
365  // ignore data forward
366  (0 until LoadPipelineWidth).foreach(i => {
367    io.forward(i).forwardMask := DontCare
368    io.forward(i).forwardData := DontCare
369  })
370
371  // store backward query and rollback
372  def detectRollback(i: Int) = {
373    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
374    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
375    val xorMask = lqIdxMask ^ enqMask
376    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt.flag
377    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
378
379    // check if load already in lq needs to be rolledback
380    val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => {
381      val addrMatch = allocated(j) &&
382        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3)
383      val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j))
384      // TODO: update refilled data
385      val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k))
386      Cat(violationVec).orR() && entryNeedCheck
387    })))
388    val lqViolation = lqViolationVec.asUInt().orR()
389    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
390    val lqViolationUop = uop(lqViolationIndex)
391    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
392    // lqViolationUop.lqIdx.value := lqViolationIndex
393    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
394
395    // when l/s writeback to roq together, check if rollback is needed
396    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
397      io.loadIn(j).valid &&
398        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
399        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
400        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
401    })))
402    val wbViolation = wbViolationVec.asUInt().orR()
403    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
404    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
405
406    // check if rollback is needed for load in l1
407    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
408      io.forward(j).valid && // L1 valid
409        isAfter(io.forward(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
410        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.forward(j).paddr(PAddrBits - 1, 3) &&
411        (io.storeIn(i).bits.mask & io.forward(j).mask).orR
412    })))
413    val l1Violation = l1ViolationVec.asUInt().orR()
414    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.forward.map(_.uop))))
415    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
416
417    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
418    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
419
420    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
421    val oneAfterZero = mask(1)(0)
422    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
423      rollbackUopVec(0),
424      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
425
426    XSDebug(
427      l1Violation,
428      "need rollback (l4 load) pc %x roqidx %d target %x\n",
429      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
430    )
431    XSDebug(
432      lqViolation,
433      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
434      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
435    )
436    XSDebug(
437      wbViolation,
438      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
439      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
440    )
441
442    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
443  }
444
445  // rollback check
446  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
447  for (i <- 0 until StorePipelineWidth) {
448    val detectedRollback = detectRollback(i)
449    rollback(i).valid := detectedRollback._1
450    rollback(i).bits := detectedRollback._2
451  }
452
453  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
454    Mux(
455      a.valid,
456      Mux(
457        b.valid,
458        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
459        a // sel a
460      ),
461      b // sel b
462    )
463  }
464
465  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
466  val lastCycleRedirect = RegNext(io.brqRedirect)
467
468  io.rollback := DontCare
469  // Note that we use roqIdx - 1.U to flush the load instruction itself.
470  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
471  io.rollback.valid := rollbackSelected.valid && (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx))
472
473  io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx - 1.U
474  io.rollback.bits.isReplay := true.B
475  io.rollback.bits.isMisPred := false.B
476  io.rollback.bits.isException := false.B
477  io.rollback.bits.isFlushPipe := false.B
478  io.rollback.bits.target := rollbackSelected.bits.cf.pc
479  io.rollback.bits.brTag := rollbackSelected.bits.brTag
480
481  // Memory mapped IO / other uncached operations
482
483  // setup misc mem access req
484  // mask / paddr / data can be get from lq.data
485  val commitType = io.commits.uop(0).ctrl.commitType
486  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
487    commitType === CommitType.LOAD &&
488    io.roqDeqPtr === uop(deqPtr).roqIdx &&
489    !io.commits.isWalk
490
491  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
492  io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr
493  io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data
494  io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask
495
496  io.uncache.req.bits.meta.id       := DontCare // TODO: // FIXME
497  io.uncache.req.bits.meta.vaddr    := DontCare
498  io.uncache.req.bits.meta.paddr    := dataModule.io.rdata(deqPtr).paddr
499  io.uncache.req.bits.meta.uop      := uop(deqPtr)
500  io.uncache.req.bits.meta.mmio     := true.B // dataModule.io.rdata(deqPtr).mmio
501  io.uncache.req.bits.meta.tlb_miss := false.B
502  io.uncache.req.bits.meta.mask     := dataModule.io.rdata(deqPtr).mask
503  io.uncache.req.bits.meta.replay   := false.B
504
505  io.uncache.resp.ready := true.B
506
507  when (io.uncache.req.fire()) {
508    pending(deqPtr) := false.B
509  }
510
511  dataModule.io.uncache.wen := false.B
512  when(io.uncache.resp.fire()){
513    datavalid(deqPtr) := true.B
514    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
515    dataModule.io.uncache.wen := true.B
516    // TODO: write back exception info
517  }
518
519  when(io.uncache.req.fire()){
520    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
521      uop(deqPtr).cf.pc,
522      io.uncache.req.bits.addr,
523      io.uncache.req.bits.data,
524      io.uncache.req.bits.cmd,
525      io.uncache.req.bits.mask
526    )
527  }
528
529  when(io.uncache.resp.fire()){
530    XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data)
531  }
532
533  // Read vaddr for mem exception
534  io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr
535
536  // misprediction recovery / exception redirect
537  // invalidate lq term using robIdx
538  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
539  for (i <- 0 until LoadQueueSize) {
540    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
541    when(needCancel(i)) {
542      // when(io.brqRedirect.bits.isReplay){
543      //   valid(i) := false.B
544      //   writebacked(i) := false.B
545      //   listening(i) := false.B
546      //   miss(i) := false.B
547      //   pending(i) := false.B
548      // }.otherwise{
549        allocated(i) := false.B
550      // }
551    }
552  }
553  when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) {
554    enqPtrExt := enqPtrExt - PopCount(needCancel)
555  }
556
557  // assert(!io.rollback.valid)
558  when(io.rollback.valid) {
559    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
560  }
561
562  // debug info
563  XSDebug("head %d:%d tail %d:%d\n", enqPtrExt.flag, enqPtr, deqPtrExt.flag, deqPtr)
564
565  def PrintFlag(flag: Bool, name: String): Unit = {
566    when(flag) {
567      XSDebug(false, true.B, name)
568    }.otherwise {
569      XSDebug(false, true.B, " ")
570    }
571  }
572
573  for (i <- 0 until LoadQueueSize) {
574    if (i % 4 == 0) XSDebug("")
575    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr)
576    PrintFlag(allocated(i), "a")
577    PrintFlag(allocated(i) && datavalid(i), "v")
578    PrintFlag(allocated(i) && writebacked(i), "w")
579    PrintFlag(allocated(i) && commited(i), "c")
580    PrintFlag(allocated(i) && miss(i), "m")
581    PrintFlag(allocated(i) && listening(i), "l")
582    PrintFlag(allocated(i) && pending(i), "p")
583    XSDebug(false, true.B, " ")
584    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
585  }
586
587}
588