xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 35ee668dfaa9d1512c3dfbcc845d4c6a4c4ec9be)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3*
4* XiangShan is licensed under Mulan PSL v2.
5* You can use this software according to the terms and conditions of the Mulan PSL v2.
6* You may obtain a copy of Mulan PSL v2 at:
7*          http://license.coscl.org.cn/MulanPSL2
8*
9* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
10* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
11* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
12*
13* See the Mulan PSL v2 for more details.
14***************************************************************************************/
15
16package xiangshan.mem
17
18import chipsalliance.rocketchip.config.Parameters
19import chisel3._
20import chisel3.util._
21import freechips.rocketchip.tile.HasFPUParameters
22import utils._
23import xiangshan._
24import xiangshan.cache._
25import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
26import xiangshan.mem._
27import xiangshan.backend.roq.RoqLsqIO
28import xiangshan.backend.fu.HasExceptionNO
29import xiangshan.frontend.FtqPtr
30
31
32class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
33  p => p(XSCoreParamsKey).LoadQueueSize
34){
35  override def cloneType = (new LqPtr).asInstanceOf[this.type]
36}
37
38object LqPtr {
39  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
40    val ptr = Wire(new LqPtr)
41    ptr.flag := f
42    ptr.value := v
43    ptr
44  }
45}
46
47trait HasFpLoadHelper { this: HasFPUParameters =>
48  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
49    LookupTree(uop.ctrl.fuOpType, List(
50      LSUOpType.lw   -> recode(rdata(31, 0), S),
51      LSUOpType.ld   -> recode(rdata(63, 0), D)
52    ))
53  }
54}
55trait HasLoadHelper { this: XSModule =>
56  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
57    val fpWen = uop.ctrl.fpWen
58    LookupTree(uop.ctrl.fuOpType, List(
59      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
60      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
61      LSUOpType.lw   -> Mux(fpWen, Cat(Fill(32, 1.U(1.W)), rdata(31, 0)), SignExt(rdata(31, 0), XLEN)),
62      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
63      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
64      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
65      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
66    ))
67  }
68}
69
70class LqEnqIO(implicit p: Parameters) extends XSBundle {
71  val canAccept = Output(Bool())
72  val sqCanAccept = Input(Bool())
73  val needAlloc = Vec(RenameWidth, Input(Bool()))
74  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
75  val resp = Vec(RenameWidth, Output(new LqPtr))
76}
77
78// Load Queue
79class LoadQueue(implicit p: Parameters) extends XSModule
80  with HasDCacheParameters
81  with HasCircularQueuePtrHelper
82  with HasLoadHelper
83  with HasExceptionNO
84{
85  val io = IO(new Bundle() {
86    val enq = new LqEnqIO
87    val brqRedirect = Flipped(ValidIO(new Redirect))
88    val flush = Input(Bool())
89    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
90    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
91    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
92    val needReplayFromRS = Vec(LoadPipelineWidth, Input(Bool()))
93    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
94    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
95    val roq = Flipped(new RoqLsqIO)
96    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
97    val dcache = Flipped(ValidIO(new Refill))
98    val uncache = new DCacheWordIO
99    val exceptionAddr = new ExceptionAddrIO
100    val lqFull = Output(Bool())
101  })
102
103  println("LoadQueue: size:" + LoadQueueSize)
104
105  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
106  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
107  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
108  dataModule.io := DontCare
109  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
110  vaddrModule.io := DontCare
111  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
112  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
113  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
114  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
115  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
116  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
117
118  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
119  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
120
121  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
122  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
123  val deqPtrExtNext = Wire(new LqPtr)
124  val allowEnqueue = RegInit(true.B)
125
126  val enqPtr = enqPtrExt(0).value
127  val deqPtr = deqPtrExt.value
128
129  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
130  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
131
132  val commitCount = RegNext(io.roq.lcommit)
133
134  /**
135    * Enqueue at dispatch
136    *
137    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
138    */
139  io.enq.canAccept := allowEnqueue
140
141  for (i <- 0 until RenameWidth) {
142    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
143    val lqIdx = enqPtrExt(offset)
144    val index = lqIdx.value
145    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush)) {
146      uop(index) := io.enq.req(i).bits
147      allocated(index) := true.B
148      datavalid(index) := false.B
149      writebacked(index) := false.B
150      miss(index) := false.B
151      // listening(index) := false.B
152      pending(index) := false.B
153    }
154    io.enq.resp(i) := lqIdx
155  }
156  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
157
158  /**
159    * Writeback load from load units
160    *
161    * Most load instructions writeback to regfile at the same time.
162    * However,
163    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
164    *   (2) For an mmio instruction without exceptions, it does not write back.
165    * The mmio instruction will be sent to lower level when it reaches ROB's head.
166    * After uncache response, it will write back through arbiter with loadUnit.
167    *   (3) For cache misses, it is marked miss and sent to dcache later.
168    * After cache refills, it will write back through arbiter with loadUnit.
169    */
170  for (i <- 0 until LoadPipelineWidth) {
171    dataModule.io.wb.wen(i) := false.B
172    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
173    when(io.loadIn(i).fire()) {
174      when(io.loadIn(i).bits.miss) {
175        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
176          io.loadIn(i).bits.uop.lqIdx.asUInt,
177          io.loadIn(i).bits.uop.cf.pc,
178          io.loadIn(i).bits.vaddr,
179          io.loadIn(i).bits.paddr,
180          io.loadIn(i).bits.data,
181          io.loadIn(i).bits.mask,
182          io.loadIn(i).bits.forwardData.asUInt,
183          io.loadIn(i).bits.forwardMask.asUInt,
184          io.loadIn(i).bits.mmio
185        )
186      }.otherwise {
187        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
188        io.loadIn(i).bits.uop.lqIdx.asUInt,
189        io.loadIn(i).bits.uop.cf.pc,
190        io.loadIn(i).bits.vaddr,
191        io.loadIn(i).bits.paddr,
192        io.loadIn(i).bits.data,
193        io.loadIn(i).bits.mask,
194        io.loadIn(i).bits.forwardData.asUInt,
195        io.loadIn(i).bits.forwardMask.asUInt,
196        io.loadIn(i).bits.mmio
197      )}
198      datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
199        !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
200        !io.needReplayFromRS(i) // do not writeback if that inst will be resend from rs
201      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
202
203      val loadWbData = Wire(new LQDataEntry)
204      loadWbData.paddr := io.loadIn(i).bits.paddr
205      loadWbData.mask := io.loadIn(i).bits.mask
206      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
207      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
208      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
209      dataModule.io.wb.wen(i) := true.B
210
211
212      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
213      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
214
215      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
216      miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.needReplayFromRS(i)
217      pending(loadWbIndex) := io.loadIn(i).bits.mmio
218      uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
219    }
220    // vaddrModule write is delayed, as vaddrModule will not be read right after write
221    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
222    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
223    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
224  }
225
226  when(io.dcache.valid) {
227    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
228  }
229
230  // Refill 64 bit in a cycle
231  // Refill data comes back from io.dcache.resp
232  dataModule.io.refill.valid := io.dcache.valid
233  dataModule.io.refill.paddr := io.dcache.bits.addr
234  dataModule.io.refill.data := io.dcache.bits.data
235
236  (0 until LoadQueueSize).map(i => {
237    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
238    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
239      datavalid(i) := true.B
240      miss(i) := false.B
241    }
242  })
243
244  // Writeback up to 2 missed load insts to CDB
245  //
246  // Pick 2 missed load (data refilled), write them back to cdb
247  // 2 refilled load will be selected from even/odd entry, separately
248
249  // Stage 0
250  // Generate writeback indexes
251
252  def getEvenBits(input: UInt): UInt = {
253    require(input.getWidth == LoadQueueSize)
254    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
255  }
256  def getOddBits(input: UInt): UInt = {
257    require(input.getWidth == LoadQueueSize)
258    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
259  }
260
261  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
262  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
263
264  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
265    allocated(i) && !writebacked(i) && datavalid(i)
266  })).asUInt() // use uint instead vec to reduce verilog lines
267  val evenDeqMask = getEvenBits(deqMask)
268  val oddDeqMask = getOddBits(deqMask)
269  // generate lastCycleSelect mask
270  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
271  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
272  // generate real select vec
273  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
274  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
275
276  def toVec(a: UInt): Vec[Bool] = {
277    VecInit(a.asBools)
278  }
279
280  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
281  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
282  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
283  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
284  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
285  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
286
287  (0 until LoadPipelineWidth).map(i => {
288    loadWbSel(i) := RegNext(loadWbSelGen(i))
289    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
290    when(io.ldout(i).fire()){
291      // Mark them as writebacked, so they will not be selected in the next cycle
292      writebacked(loadWbSel(i)) := true.B
293    }
294  })
295
296  // Stage 1
297  // Use indexes generated in cycle 0 to read data
298  // writeback data to cdb
299  (0 until LoadPipelineWidth).map(i => {
300    // data select
301    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
302    val rdata = dataModule.io.wb.rdata(i).data
303    val seluop = uop(loadWbSel(i))
304    val func = seluop.ctrl.fuOpType
305    val raddr = dataModule.io.wb.rdata(i).paddr
306    val rdataSel = LookupTree(raddr(2, 0), List(
307      "b000".U -> rdata(63, 0),
308      "b001".U -> rdata(63, 8),
309      "b010".U -> rdata(63, 16),
310      "b011".U -> rdata(63, 24),
311      "b100".U -> rdata(63, 32),
312      "b101".U -> rdata(63, 40),
313      "b110".U -> rdata(63, 48),
314      "b111".U -> rdata(63, 56)
315    ))
316    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
317
318    // writeback missed int/fp load
319    //
320    // Int load writeback will finish (if not blocked) in one cycle
321    io.ldout(i).bits.uop := seluop
322    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
323    io.ldout(i).bits.data := rdataPartialLoad
324    io.ldout(i).bits.redirectValid := false.B
325    io.ldout(i).bits.redirect := DontCare
326    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
327    io.ldout(i).bits.debug.isPerfCnt := false.B
328    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
329    io.ldout(i).bits.fflags := DontCare
330    io.ldout(i).valid := loadWbSelV(i)
331
332    when(io.ldout(i).fire()) {
333      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
334        io.ldout(i).bits.uop.roqIdx.asUInt,
335        io.ldout(i).bits.uop.lqIdx.asUInt,
336        io.ldout(i).bits.uop.cf.pc,
337        debug_mmio(loadWbSel(i))
338      )
339    }
340
341  })
342
343  /**
344    * Load commits
345    *
346    * When load commited, mark it as !allocated and move deqPtrExt forward.
347    */
348  (0 until CommitWidth).map(i => {
349    when(commitCount > i.U){
350      allocated(deqPtr+i.U) := false.B
351    }
352  })
353
354  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
355    val length = mask.length
356    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
357    val highBitsUint = Cat(highBits.reverse)
358    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
359  }
360
361  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
362    assert(valid.length == uop.length)
363    assert(valid.length == 2)
364    Mux(valid(0) && valid(1),
365      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
366      Mux(valid(0) && !valid(1), uop(0), uop(1)))
367  }
368
369  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
370    assert(valid.length == uop.length)
371    val length = valid.length
372    (0 until length).map(i => {
373      (0 until length).map(j => {
374        Mux(valid(i) && valid(j),
375          isAfter(uop(i).roqIdx, uop(j).roqIdx),
376          Mux(!valid(i), true.B, false.B))
377      })
378    })
379  }
380
381  /**
382    * Memory violation detection
383    *
384    * When store writes back, it searches LoadQueue for younger load instructions
385    * with the same load physical address. They loaded wrong data and need re-execution.
386    *
387    * Cycle 0: Store Writeback
388    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
389    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
390    * Cycle 1: Redirect Generation
391    *   There're three possible types of violations, up to 6 possible redirect requests.
392    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
393    * Cycle 2: Redirect Fire
394    *   Choose the oldest load (part 2). (3 -> 1)
395    *   Prepare redirect request according to the detected violation.
396    *   Fire redirect request (if valid)
397    */
398
399  // stage 0:        lq l1 wb     l1 wb lq
400  //                 |  |  |      |  |  |  (paddr match)
401  // stage 1:        lq l1 wb     l1 wb lq
402  //                 |  |  |      |  |  |
403  //                 |  |------------|  |
404  //                 |        |         |
405  // stage 2:        lq      l1wb       lq
406  //                 |        |         |
407  //                 --------------------
408  //                          |
409  //                      rollback req
410  io.load_s1 := DontCare
411  def detectRollback(i: Int) = {
412    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
413    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
414    val xorMask = lqIdxMask ^ enqMask
415    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
416    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
417
418    // check if load already in lq needs to be rolledback
419    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
420    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
421    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
422    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
423      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
424    })))
425    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
426      addrMaskMatch(j) && entryNeedCheck(j)
427    }))
428    val lqViolation = lqViolationVec.asUInt().orR()
429    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
430    val lqViolationUop = uop(lqViolationIndex)
431    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
432    // lqViolationUop.lqIdx.value := lqViolationIndex
433    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
434
435    // when l/s writeback to roq together, check if rollback is needed
436    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
437      io.loadIn(j).valid &&
438        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
439        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
440        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
441    })))
442    val wbViolation = wbViolationVec.asUInt().orR()
443    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
444    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
445
446    // check if rollback is needed for load in l1
447    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
448      io.load_s1(j).valid && // L1 valid
449        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
450        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
451        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
452    })))
453    val l1Violation = l1ViolationVec.asUInt().orR()
454    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
455    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
456
457    XSDebug(
458      l1Violation,
459      "need rollback (l1 load) pc %x roqidx %d target %x\n",
460      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
461    )
462    XSDebug(
463      lqViolation,
464      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
465      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
466    )
467    XSDebug(
468      wbViolation,
469      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
470      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
471    )
472
473    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
474  }
475
476  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
477    Mux(
478      a.valid,
479      Mux(
480        b.valid,
481        Mux(isAfter(a.bits.uop.roqIdx, b.bits.uop.roqIdx), b, a), // a,b both valid, sel oldest
482        a // sel a
483      ),
484      b // sel b
485    )
486  }
487  val lastCycleRedirect = RegNext(io.brqRedirect)
488  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
489  val lastCycleFlush = RegNext(io.flush)
490  val lastlastCycleFlush = RegNext(lastCycleFlush)
491
492  // S2: select rollback (part1) and generate rollback request
493  // rollback check
494  // Wb/L1 rollback seq check is done in s2
495  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
496  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
497  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
498  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
499  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
500  // store ftq index for store set update
501  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
502  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
503  for (i <- 0 until StorePipelineWidth) {
504    val detectedRollback = detectRollback(i)
505    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
506    rollbackLq(i).bits.uop := detectedRollback._1._2
507    rollbackLq(i).bits.flag := i.U
508    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
509    rollbackWb(i).bits.uop := detectedRollback._2._2
510    rollbackWb(i).bits.flag := i.U
511    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
512    rollbackL1(i).bits.uop := detectedRollback._3._2
513    rollbackL1(i).bits.flag := i.U
514    rollbackL1Wb(2*i) := rollbackL1(i)
515    rollbackL1Wb(2*i+1) := rollbackWb(i)
516    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
517    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
518  }
519
520  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
521  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
522  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
523  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
524  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
525  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
526  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
527
528  // S3: select rollback (part2), generate rollback request, then fire rollback request
529  // Note that we use roqIdx - 1.U to flush the load instruction itself.
530  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
531
532  // FIXME: this is ugly
533  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
534  val rollbackUopExtVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
535
536  // select uop in parallel
537  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
538  val oneAfterZero = mask(1)(0)
539  val rollbackUopExt = Mux(oneAfterZero && mask(2)(0),
540    rollbackUopExtVec(0),
541    Mux(!oneAfterZero && mask(2)(1), rollbackUopExtVec(1), rollbackUopExtVec(2)))
542  val stFtqIdxS3 = RegNext(stFtqIdxS2)
543  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
544  val rollbackUop = rollbackUopExt.uop
545  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
546  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
547
548  // check if rollback request is still valid in parallel
549  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
550  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
551    rollbackValidVecChecked(idx) := v &&
552      (!lastCycleRedirect.valid || isBefore(uop.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
553      (!lastlastCycleRedirect.valid || isBefore(uop.roqIdx, lastlastCycleRedirect.bits.roqIdx))
554  }
555
556  io.rollback.bits.roqIdx := rollbackUop.roqIdx
557  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
558  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
559  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
560  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
561  io.rollback.bits.level := RedirectLevel.flush
562  io.rollback.bits.interrupt := DontCare
563  io.rollback.bits.cfiUpdate := DontCare
564  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
565  // io.rollback.bits.pc := DontCare
566
567  io.rollback.valid := rollbackValidVecChecked.asUInt.orR && !lastCycleFlush && !lastlastCycleFlush
568
569  when(io.rollback.valid) {
570    // XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.cfi, io.rollback.bits.roqIdx.asUInt)
571  }
572
573  /**
574    * Memory mapped IO / other uncached operations
575    *
576    * States:
577    * (1) writeback from store units: mark as pending
578    * (2) when they reach ROB's head, they can be sent to uncache channel
579    * (3) response from uncache channel: mark as datavalid
580    * (4) writeback to ROB (and other units): mark as writebacked
581    * (5) ROB commits the instruction: same as normal instructions
582    */
583  //(2) when they reach ROB's head, they can be sent to uncache channel
584  val lqTailMmioPending = WireInit(pending(deqPtr))
585  val lqTailAllocated = WireInit(allocated(deqPtr))
586  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
587  val uncacheState = RegInit(s_idle)
588  switch(uncacheState) {
589    is(s_idle) {
590      when(io.roq.pendingld && lqTailMmioPending && lqTailAllocated) {
591        uncacheState := s_req
592      }
593    }
594    is(s_req) {
595      when(io.uncache.req.fire()) {
596        uncacheState := s_resp
597      }
598    }
599    is(s_resp) {
600      when(io.uncache.resp.fire()) {
601        uncacheState := s_wait
602      }
603    }
604    is(s_wait) {
605      when(io.roq.commit) {
606        uncacheState := s_idle // ready for next mmio
607      }
608    }
609  }
610  io.uncache.req.valid := uncacheState === s_req
611
612  dataModule.io.uncache.raddr := deqPtrExtNext.value
613
614  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
615  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
616  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
617  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
618
619  io.uncache.req.bits.id   := DontCare
620
621  io.uncache.resp.ready := true.B
622
623  when (io.uncache.req.fire()) {
624    pending(deqPtr) := false.B
625
626    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
627      uop(deqPtr).cf.pc,
628      io.uncache.req.bits.addr,
629      io.uncache.req.bits.data,
630      io.uncache.req.bits.cmd,
631      io.uncache.req.bits.mask
632    )
633  }
634
635  // (3) response from uncache channel: mark as datavalid
636  dataModule.io.uncache.wen := false.B
637  when(io.uncache.resp.fire()){
638    datavalid(deqPtr) := true.B
639    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
640    dataModule.io.uncache.wen := true.B
641
642    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
643  }
644
645  // Read vaddr for mem exception
646  // no inst will be commited 1 cycle before tval update
647  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
648  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
649
650  // misprediction recovery / exception redirect
651  // invalidate lq term using robIdx
652  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
653  for (i <- 0 until LoadQueueSize) {
654    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect, io.flush) && allocated(i)
655    when (needCancel(i)) {
656        allocated(i) := false.B
657    }
658  }
659
660  /**
661    * update pointers
662    */
663  val lastCycleCancelCount = PopCount(RegNext(needCancel))
664  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
665  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !(io.brqRedirect.valid || io.flush), PopCount(io.enq.req.map(_.valid)), 0.U)
666  when (lastCycleRedirect.valid || lastCycleFlush) {
667    // we recover the pointers in the next cycle after redirect
668    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
669  }.otherwise {
670    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
671  }
672
673  deqPtrExtNext := deqPtrExt + commitCount
674  deqPtrExt := deqPtrExtNext
675
676  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
677
678  allowEnqueue := validCount + enqNumber <= (LoadQueueSize - RenameWidth).U
679
680  /**
681    * misc
682    */
683  io.roq.storeDataRoqWb := DontCare // will be overwriten by store queue's result
684
685  // perf counter
686  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
687  io.lqFull := !allowEnqueue
688  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
689  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
690  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
691  XSPerfAccumulate("refill", io.dcache.valid)
692  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
693  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
694  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
695
696  // debug info
697  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
698
699  def PrintFlag(flag: Bool, name: String): Unit = {
700    when(flag) {
701      XSDebug(false, true.B, name)
702    }.otherwise {
703      XSDebug(false, true.B, " ")
704    }
705  }
706
707  for (i <- 0 until LoadQueueSize) {
708    if (i % 4 == 0) XSDebug("")
709    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
710    PrintFlag(allocated(i), "a")
711    PrintFlag(allocated(i) && datavalid(i), "v")
712    PrintFlag(allocated(i) && writebacked(i), "w")
713    PrintFlag(allocated(i) && miss(i), "m")
714    // PrintFlag(allocated(i) && listening(i), "l")
715    PrintFlag(allocated(i) && pending(i), "p")
716    XSDebug(false, true.B, " ")
717    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
718  }
719
720}
721