xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 3a6db8a39a25f02047d1fb2b257c89be0b2c36dc)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.backend.fu.fpu.FPU
25import xiangshan.backend.rob.RobLsqIO
26import xiangshan.cache._
27import xiangshan.frontend.FtqPtr
28
29
30class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
31  p => p(XSCoreParamsKey).LoadQueueSize
32){
33  override def cloneType = (new LqPtr).asInstanceOf[this.type]
34}
35
36object LqPtr {
37  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
38    val ptr = Wire(new LqPtr)
39    ptr.flag := f
40    ptr.value := v
41    ptr
42  }
43}
44
45trait HasLoadHelper { this: XSModule =>
46  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
47    val fpWen = uop.ctrl.fpWen
48    LookupTree(uop.ctrl.fuOpType, List(
49      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
50      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
51      /*
52          riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values
53          Any operation that writes a narrower result to an f register must write
54          all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value.
55      */
56      LSUOpType.lw   -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)),
57      LSUOpType.ld   -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)),
58      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
59      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
60      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
61    ))
62  }
63}
64
65class LqEnqIO(implicit p: Parameters) extends XSBundle {
66  val canAccept = Output(Bool())
67  val sqCanAccept = Input(Bool())
68  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
69  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
70  val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr))
71}
72
73class LqTriggerIO(implicit p: Parameters) extends XSBundle {
74  val hitLoadAddrTriggerHitVec = Input(Vec(3, Bool()))
75  val lqLoadAddrTriggerHitVec = Output(Vec(3, Bool()))
76}
77
78// Load Queue
79class LoadQueue(implicit p: Parameters) extends XSModule
80  with HasDCacheParameters
81  with HasCircularQueuePtrHelper
82  with HasLoadHelper
83  with HasPerfEvents
84{
85  val io = IO(new Bundle() {
86    val enq = new LqEnqIO
87    val brqRedirect = Flipped(ValidIO(new Redirect))
88    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
89    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
90    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
91    val dcacheRequireReplay = Vec(LoadPipelineWidth, Input(Bool()))
92    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
93    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed
94    val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO))
95    val rob = Flipped(new RobLsqIO)
96    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
97    val dcache = Flipped(ValidIO(new Refill)) // TODO: to be renamed
98    val release = Flipped(ValidIO(new Release))
99    val uncache = new DCacheWordIO
100    val exceptionAddr = new ExceptionAddrIO
101    val lqFull = Output(Bool())
102    val lqCancelCnt = Output(UInt(log2Up(LoadQueueSize + 1).W))
103    val trigger = Vec(LoadPipelineWidth, new LqTriggerIO)
104  })
105
106  println("LoadQueue: size:" + LoadQueueSize)
107
108  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
109  // val data = Reg(Vec(LoadQueueSize, new LsRobEntry))
110  val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
111  dataModule.io := DontCare
112  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 3, numWrite = LoadPipelineWidth))
113  vaddrModule.io := DontCare
114  val vaddrTriggerResultModule = Module(new SyncDataModuleTemplate(Vec(3, Bool()), LoadQueueSize, numRead = LoadPipelineWidth, numWrite = LoadPipelineWidth))
115  vaddrTriggerResultModule.io := DontCare
116  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
117  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
118  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
119  val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache
120  val error = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been corrupted
121  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
122  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
123  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
124  val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
125
126  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
127  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
128
129  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr))))
130  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
131  val deqPtrExtNext = Wire(new LqPtr)
132
133  val enqPtr = enqPtrExt(0).value
134  val deqPtr = deqPtrExt.value
135
136  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
137  val allowEnqueue = validCount <= (LoadQueueSize - 2).U
138
139  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
140  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
141
142  val commitCount = RegNext(io.rob.lcommit)
143
144  /**
145    * Enqueue at dispatch
146    *
147    * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth
148    */
149  io.enq.canAccept := allowEnqueue
150
151  val canEnqueue = io.enq.req.map(_.valid)
152  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
153  for (i <- 0 until io.enq.req.length) {
154    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
155    val lqIdx = enqPtrExt(offset)
156    val index = io.enq.req(i).bits.lqIdx.value
157    when (canEnqueue(i) && !enqCancel(i)) {
158      uop(index).robIdx := io.enq.req(i).bits.robIdx
159      allocated(index) := true.B
160      datavalid(index) := false.B
161      writebacked(index) := false.B
162      released(index) := false.B
163      miss(index) := false.B
164      pending(index) := false.B
165      error(index) := false.B
166      XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n")
167      XSError(index =/= lqIdx.value, s"must be the same entry $i\n")
168    }
169    io.enq.resp(i) := lqIdx
170  }
171  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
172
173  /**
174    * Writeback load from load units
175    *
176    * Most load instructions writeback to regfile at the same time.
177    * However,
178    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
179    *   (2) For an mmio instruction without exceptions, it does not write back.
180    * The mmio instruction will be sent to lower level when it reaches ROB's head.
181    * After uncache response, it will write back through arbiter with loadUnit.
182    *   (3) For cache misses, it is marked miss and sent to dcache later.
183    * After cache refills, it will write back through arbiter with loadUnit.
184    */
185  for (i <- 0 until LoadPipelineWidth) {
186    dataModule.io.wb.wen(i) := false.B
187    vaddrTriggerResultModule.io.wen(i) := false.B
188    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
189
190    // most lq status need to be updated immediately after load writeback to lq
191    when(io.loadIn(i).fire()) {
192      when(io.loadIn(i).bits.miss) {
193        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
194          io.loadIn(i).bits.uop.lqIdx.asUInt,
195          io.loadIn(i).bits.uop.cf.pc,
196          io.loadIn(i).bits.vaddr,
197          io.loadIn(i).bits.paddr,
198          io.loadIn(i).bits.data,
199          io.loadIn(i).bits.mask,
200          io.loadIn(i).bits.forwardData.asUInt,
201          io.loadIn(i).bits.forwardMask.asUInt,
202          io.loadIn(i).bits.mmio
203        )
204      }.otherwise {
205        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
206        io.loadIn(i).bits.uop.lqIdx.asUInt,
207        io.loadIn(i).bits.uop.cf.pc,
208        io.loadIn(i).bits.vaddr,
209        io.loadIn(i).bits.paddr,
210        io.loadIn(i).bits.data,
211        io.loadIn(i).bits.mask,
212        io.loadIn(i).bits.forwardData.asUInt,
213        io.loadIn(i).bits.forwardMask.asUInt,
214        io.loadIn(i).bits.mmio
215      )}
216      if(EnableFastForward){
217        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
218          !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
219          !io.dcacheRequireReplay(i) // do not writeback if that inst will be resend from rs
220      } else {
221        datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
222          !io.loadIn(i).bits.mmio // mmio data is not valid until we finished uncache access
223      }
224      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
225
226      val loadWbData = Wire(new LQDataEntry)
227      loadWbData.paddr := io.loadIn(i).bits.paddr
228      loadWbData.mask := io.loadIn(i).bits.mask
229      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
230      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
231      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
232      dataModule.io.wb.wen(i) := true.B
233
234      vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
235      vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
236      vaddrTriggerResultModule.io.wen(i) := true.B
237
238      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
239      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
240
241      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
242      if(EnableFastForward){
243        miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.dcacheRequireReplay(i)
244      } else {
245        miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i)
246      }
247      pending(loadWbIndex) := io.loadIn(i).bits.mmio
248      // dirty code for load instr
249      uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
250      uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
251      uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
252      uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
253    }
254
255    // vaddrModule write is delayed, as vaddrModule will not be read right after write
256    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
257    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
258    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
259  }
260
261  when(io.dcache.valid) {
262    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
263  }
264
265  // Refill 64 bit in a cycle
266  // Refill data comes back from io.dcache.resp
267  dataModule.io.refill.valid := io.dcache.valid
268  dataModule.io.refill.paddr := io.dcache.bits.addr
269  dataModule.io.refill.data := io.dcache.bits.data
270
271  val dcacheRequireReplay = WireInit(VecInit((0 until LoadPipelineWidth).map(i =>{
272    RegNext(io.loadIn(i).fire()) && RegNext(io.dcacheRequireReplay(i))
273  })))
274  dontTouch(dcacheRequireReplay)
275
276  (0 until LoadQueueSize).map(i => {
277    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
278    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
279      datavalid(i) := true.B
280      miss(i) := false.B
281      when(!dcacheRequireReplay.asUInt.orR){
282        refilling(i) := true.B
283      }
284      when(io.dcache.bits.error) {
285        error(i) := true.B
286      }
287    }
288  })
289
290  for (i <- 0 until LoadPipelineWidth) {
291    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
292    if(!EnableFastForward){
293      // dcacheRequireReplay will be used to update lq flag 1 cycle after for better timing
294      //
295      // io.dcacheRequireReplay comes from dcache miss req reject, which is quite slow to generate
296      when(dcacheRequireReplay(i)) {
297        // do not writeback if that inst will be resend from rs
298        // rob writeback will not be triggered by a refill before inst replay
299        miss(RegNext(loadWbIndex)) := false.B // disable refill listening
300        datavalid(RegNext(loadWbIndex)) := false.B // disable refill listening
301        assert(!datavalid(RegNext(loadWbIndex)))
302      }
303    }
304  }
305
306  // Writeback up to 2 missed load insts to CDB
307  //
308  // Pick 2 missed load (data refilled), write them back to cdb
309  // 2 refilled load will be selected from even/odd entry, separately
310
311  // Stage 0
312  // Generate writeback indexes
313
314  def getEvenBits(input: UInt): UInt = {
315    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
316  }
317  def getOddBits(input: UInt): UInt = {
318    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
319  }
320
321  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
322  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
323
324  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
325    allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i))
326  })).asUInt() // use uint instead vec to reduce verilog lines
327  val evenDeqMask = getEvenBits(deqMask)
328  val oddDeqMask = getOddBits(deqMask)
329  // generate lastCycleSelect mask
330  val evenFireMask = getEvenBits(UIntToOH(loadWbSel(0)))
331  val oddFireMask = getOddBits(UIntToOH(loadWbSel(1)))
332  // generate real select vec
333  def toVec(a: UInt): Vec[Bool] = {
334    VecInit(a.asBools)
335  }
336  val loadEvenSelVecFire = getEvenBits(loadWbSelVec) & ~evenFireMask
337  val loadOddSelVecFire = getOddBits(loadWbSelVec) & ~oddFireMask
338  val loadEvenSelVecNotFire = getEvenBits(loadWbSelVec)
339  val loadOddSelVecNotFire = getOddBits(loadWbSelVec)
340  val loadEvenSel = Mux(
341    io.ldout(0).fire(),
342    getFirstOne(toVec(loadEvenSelVecFire), evenDeqMask),
343    getFirstOne(toVec(loadEvenSelVecNotFire), evenDeqMask)
344  )
345  val loadOddSel= Mux(
346    io.ldout(1).fire(),
347    getFirstOne(toVec(loadOddSelVecFire), oddDeqMask),
348    getFirstOne(toVec(loadOddSelVecNotFire), oddDeqMask)
349  )
350
351
352  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
353  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
354  loadWbSelGen(0) := Cat(loadEvenSel, 0.U(1.W))
355  loadWbSelVGen(0):= Mux(io.ldout(0).fire(), loadEvenSelVecFire.asUInt.orR, loadEvenSelVecNotFire.asUInt.orR)
356  loadWbSelGen(1) := Cat(loadOddSel, 1.U(1.W))
357  loadWbSelVGen(1) := Mux(io.ldout(1).fire(), loadOddSelVecFire.asUInt.orR, loadOddSelVecNotFire.asUInt.orR)
358
359  (0 until LoadPipelineWidth).map(i => {
360    loadWbSel(i) := RegNext(loadWbSelGen(i))
361    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
362    when(io.ldout(i).fire()){
363      // Mark them as writebacked, so they will not be selected in the next cycle
364      writebacked(loadWbSel(i)) := true.B
365    }
366  })
367
368  // Stage 1
369  // Use indexes generated in cycle 0 to read data
370  // writeback data to cdb
371  (0 until LoadPipelineWidth).map(i => {
372    // data select
373    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
374    val rdata = dataModule.io.wb.rdata(i).data
375    val seluop = uop(loadWbSel(i))
376    val func = seluop.ctrl.fuOpType
377    val raddr = dataModule.io.wb.rdata(i).paddr
378    val rdataSel = LookupTree(raddr(2, 0), List(
379      "b000".U -> rdata(63, 0),
380      "b001".U -> rdata(63, 8),
381      "b010".U -> rdata(63, 16),
382      "b011".U -> rdata(63, 24),
383      "b100".U -> rdata(63, 32),
384      "b101".U -> rdata(63, 40),
385      "b110".U -> rdata(63, 48),
386      "b111".U -> rdata(63, 56)
387    ))
388    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
389
390    // writeback missed int/fp load
391    //
392    // Int load writeback will finish (if not blocked) in one cycle
393    io.ldout(i).bits.uop := seluop
394    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
395    io.ldout(i).bits.data := rdataPartialLoad
396    io.ldout(i).bits.redirectValid := false.B
397    io.ldout(i).bits.redirect := DontCare
398    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
399    io.ldout(i).bits.debug.isPerfCnt := false.B
400    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
401    io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1)
402    io.ldout(i).bits.fflags := DontCare
403    io.ldout(i).valid := loadWbSelV(i)
404
405    when(io.ldout(i).fire()) {
406      XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
407        io.ldout(i).bits.uop.robIdx.asUInt,
408        io.ldout(i).bits.uop.lqIdx.asUInt,
409        io.ldout(i).bits.uop.cf.pc,
410        debug_mmio(loadWbSel(i))
411      )
412    }
413
414  })
415
416  /**
417    * Load commits
418    *
419    * When load commited, mark it as !allocated and move deqPtrExt forward.
420    */
421  (0 until CommitWidth).map(i => {
422    when(commitCount > i.U){
423      allocated((deqPtrExt+i.U).value) := false.B
424      XSError(!allocated((deqPtrExt+i.U).value), s"why commit invalid entry $i?\n")
425    }
426  })
427
428  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
429    val length = mask.length
430    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
431    val highBitsUint = Cat(highBits.reverse)
432    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
433  }
434
435  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
436    assert(valid.length == uop.length)
437    assert(valid.length == 2)
438    Mux(valid(0) && valid(1),
439      Mux(isAfter(uop(0).robIdx, uop(1).robIdx), uop(1), uop(0)),
440      Mux(valid(0) && !valid(1), uop(0), uop(1)))
441  }
442
443  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
444    assert(valid.length == uop.length)
445    val length = valid.length
446    (0 until length).map(i => {
447      (0 until length).map(j => {
448        Mux(valid(i) && valid(j),
449          isAfter(uop(i).robIdx, uop(j).robIdx),
450          Mux(!valid(i), true.B, false.B))
451      })
452    })
453  }
454
455  /**
456    * Store-Load Memory violation detection
457    *
458    * When store writes back, it searches LoadQueue for younger load instructions
459    * with the same load physical address. They loaded wrong data and need re-execution.
460    *
461    * Cycle 0: Store Writeback
462    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
463    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
464    * Cycle 1: Redirect Generation
465    *   There're three possible types of violations, up to 6 possible redirect requests.
466    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
467    * Cycle 2: Redirect Fire
468    *   Choose the oldest load (part 2). (3 -> 1)
469    *   Prepare redirect request according to the detected violation.
470    *   Fire redirect request (if valid)
471    */
472
473  // stage 0:        lq l1 wb     l1 wb lq
474  //                 |  |  |      |  |  |  (paddr match)
475  // stage 1:        lq l1 wb     l1 wb lq
476  //                 |  |  |      |  |  |
477  //                 |  |------------|  |
478  //                 |        |         |
479  // stage 2:        lq      l1wb       lq
480  //                 |        |         |
481  //                 --------------------
482  //                          |
483  //                      rollback req
484  io.load_s1 := DontCare
485  def detectRollback(i: Int) = {
486    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
487    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
488    val xorMask = lqIdxMask ^ enqMask
489    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
490    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
491
492    // check if load already in lq needs to be rolledback
493    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
494    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
495    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
496    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
497      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
498    })))
499    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
500      addrMaskMatch(j) && entryNeedCheck(j)
501    }))
502    val lqViolation = lqViolationVec.asUInt().orR()
503    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
504    val lqViolationUop = uop(lqViolationIndex)
505    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
506    // lqViolationUop.lqIdx.value := lqViolationIndex
507    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
508
509    // when l/s writeback to rob together, check if rollback is needed
510    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
511      io.loadIn(j).valid &&
512        isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
513        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
514        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
515    })))
516    val wbViolation = wbViolationVec.asUInt().orR()
517    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
518    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
519
520    // check if rollback is needed for load in l1
521    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
522      io.load_s1(j).valid && // L1 valid
523        isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
524        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
525        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
526    })))
527    val l1Violation = l1ViolationVec.asUInt().orR()
528    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
529    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
530
531    XSDebug(
532      l1Violation,
533      "need rollback (l1 load) pc %x robidx %d target %x\n",
534      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt
535    )
536    XSDebug(
537      lqViolation,
538      "need rollback (ld wb before store) pc %x robidx %d target %x\n",
539      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt
540    )
541    XSDebug(
542      wbViolation,
543      "need rollback (ld/st wb together) pc %x robidx %d target %x\n",
544      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt
545    )
546
547    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
548  }
549
550  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
551    Mux(
552      a.valid,
553      Mux(
554        b.valid,
555        Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest
556        a // sel a
557      ),
558      b // sel b
559    )
560  }
561  val lastCycleRedirect = RegNext(io.brqRedirect)
562  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
563
564  // S2: select rollback (part1) and generate rollback request
565  // rollback check
566  // Wb/L1 rollback seq check is done in s2
567  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
568  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
569  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
570  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
571  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
572  // store ftq index for store set update
573  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
574  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
575  for (i <- 0 until StorePipelineWidth) {
576    val detectedRollback = detectRollback(i)
577    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
578    rollbackLq(i).bits.uop := detectedRollback._1._2
579    rollbackLq(i).bits.flag := i.U
580    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
581    rollbackWb(i).bits.uop := detectedRollback._2._2
582    rollbackWb(i).bits.flag := i.U
583    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
584    rollbackL1(i).bits.uop := detectedRollback._3._2
585    rollbackL1(i).bits.flag := i.U
586    rollbackL1Wb(2*i) := rollbackL1(i)
587    rollbackL1Wb(2*i+1) := rollbackWb(i)
588    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
589    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
590  }
591
592  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
593  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
594  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
595  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
596  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
597  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
598  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
599
600  // S3: select rollback (part2), generate rollback request, then fire rollback request
601  // Note that we use robIdx - 1.U to flush the load instruction itself.
602  // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect.
603
604  // FIXME: this is ugly
605  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
606  val rollbackUopExtVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
607
608  // select uop in parallel
609  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
610  val oneAfterZero = mask(1)(0)
611  val rollbackUopExt = Mux(oneAfterZero && mask(2)(0),
612    rollbackUopExtVec(0),
613    Mux(!oneAfterZero && mask(2)(1), rollbackUopExtVec(1), rollbackUopExtVec(2)))
614  val stFtqIdxS3 = RegNext(stFtqIdxS2)
615  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
616  val rollbackUop = rollbackUopExt.uop
617  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
618  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
619
620  // check if rollback request is still valid in parallel
621  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
622  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
623    rollbackValidVecChecked(idx) := v &&
624      (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) &&
625      (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx))
626  }
627
628  io.rollback.bits.robIdx := rollbackUop.robIdx
629  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
630  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
631  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
632  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
633  io.rollback.bits.level := RedirectLevel.flush
634  io.rollback.bits.interrupt := DontCare
635  io.rollback.bits.cfiUpdate := DontCare
636  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
637  io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id
638  // io.rollback.bits.pc := DontCare
639
640  io.rollback.valid := rollbackValidVecChecked.asUInt.orR
641
642  when(io.rollback.valid) {
643    // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt)
644  }
645
646  /**
647  * Load-Load Memory violation detection
648  *
649  * When load arrives load_s1, it searches LoadQueue for younger load instructions
650  * with the same load physical address. If younger load has been released (or observed),
651  * the younger load needs to be re-execed.
652  *
653  * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst,
654  * the two loads will be replayed if the older load becomes the head of rob.
655  *
656  * When dcache releases a line, mark all writebacked entrys in load queue with
657  * the same line paddr as released.
658  */
659
660  // Load-Load Memory violation query
661  val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize)
662  (0 until LoadPipelineWidth).map(i => {
663    dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr
664    io.loadViolationQuery(i).req.ready := true.B
665    io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire())
666    // Generate real violation mask
667    // Note that we use UIntToMask.rightmask here
668    val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value
669    val lqIdxMask = UIntToMask.rightmask(startIndex, LoadQueueSize)
670    val xorMask = lqIdxMask ^ deqRightMask
671    val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === deqPtrExt.flag
672    val toDeqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
673    val ldld_violation_mask = WireInit(VecInit((0 until LoadQueueSize).map(j => {
674      dataModule.io.release_violation(i).match_mask(j) && // addr match
675      toDeqPtrMask(j) && // the load is younger than current load
676      allocated(j) && // entry is valid
677      released(j) && // cacheline is released
678      (datavalid(j) || miss(j)) // paddr is valid
679    })))
680    dontTouch(ldld_violation_mask)
681    ldld_violation_mask.suggestName("ldldViolationMask_" + i)
682    io.loadViolationQuery(i).resp.bits.have_violation := RegNext(ldld_violation_mask.asUInt.orR)
683  })
684
685  // "released" flag update
686  //
687  // When io.release.valid, it uses the last ld-ld paddr cam port to
688  // update release flag in 1 cycle
689  when(io.release.valid){
690    // Take over ld-ld paddr cam port
691    dataModule.io.release_violation.takeRight(1)(0).paddr := io.release.bits.paddr
692    io.loadViolationQuery.takeRight(1)(0).req.ready := false.B
693    // If a load needs that cam port, replay it from rs
694  }
695
696  (0 until LoadQueueSize).map(i => {
697    when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) &&
698      allocated(i) &&
699      writebacked(i) &&
700      io.release.valid
701    )){
702      // Note: if a load has missed in dcache and is waiting for refill in load queue,
703      // its released flag still needs to be set as true if addr matches.
704      released(i) := true.B
705    }
706  })
707
708  /**
709    * Memory mapped IO / other uncached operations
710    *
711    * States:
712    * (1) writeback from store units: mark as pending
713    * (2) when they reach ROB's head, they can be sent to uncache channel
714    * (3) response from uncache channel: mark as datavalid
715    * (4) writeback to ROB (and other units): mark as writebacked
716    * (5) ROB commits the instruction: same as normal instructions
717    */
718  //(2) when they reach ROB's head, they can be sent to uncache channel
719  val lqTailMmioPending = WireInit(pending(deqPtr))
720  val lqTailAllocated = WireInit(allocated(deqPtr))
721  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
722  val uncacheState = RegInit(s_idle)
723  switch(uncacheState) {
724    is(s_idle) {
725      when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) {
726        uncacheState := s_req
727      }
728    }
729    is(s_req) {
730      when(io.uncache.req.fire()) {
731        uncacheState := s_resp
732      }
733    }
734    is(s_resp) {
735      when(io.uncache.resp.fire()) {
736        uncacheState := s_wait
737      }
738    }
739    is(s_wait) {
740      when(RegNext(io.rob.commit)) {
741        uncacheState := s_idle // ready for next mmio
742      }
743    }
744  }
745  io.uncache.req.valid := uncacheState === s_req
746
747  dataModule.io.uncache.raddr := deqPtrExtNext.value
748
749  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
750  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
751  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
752  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
753
754  io.uncache.req.bits.id   := DontCare
755  io.uncache.req.bits.instrtype := DontCare
756
757  io.uncache.resp.ready := true.B
758
759  when (io.uncache.req.fire()) {
760    pending(deqPtr) := false.B
761
762    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
763      uop(deqPtr).cf.pc,
764      io.uncache.req.bits.addr,
765      io.uncache.req.bits.data,
766      io.uncache.req.bits.cmd,
767      io.uncache.req.bits.mask
768    )
769  }
770
771  // (3) response from uncache channel: mark as datavalid
772  dataModule.io.uncache.wen := false.B
773  when(io.uncache.resp.fire()){
774    datavalid(deqPtr) := true.B
775    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
776    dataModule.io.uncache.wen := true.B
777
778    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
779  }
780
781  // Read vaddr for mem exception
782  // no inst will be commited 1 cycle before tval update
783  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
784  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
785
786  // Read vaddr for debug
787  (0 until LoadPipelineWidth).map(i => {
788    vaddrModule.io.raddr(i+1) := loadWbSel(i)
789  })
790
791  (0 until LoadPipelineWidth).map(i => {
792    vaddrTriggerResultModule.io.raddr(i) := loadWbSelGen(i)
793    io.trigger(i).lqLoadAddrTriggerHitVec := vaddrTriggerResultModule.io.rdata(i)
794  })
795
796  // misprediction recovery / exception redirect
797  // invalidate lq term using robIdx
798  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
799  for (i <- 0 until LoadQueueSize) {
800    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i)
801    when (needCancel(i)) {
802      allocated(i) := false.B
803    }
804  }
805
806  /**
807    * update pointers
808    */
809  val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2))))
810  val lastCycleCancelCount = PopCount(RegNext(needCancel))
811  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U)
812  when (lastCycleRedirect.valid) {
813    // we recover the pointers in the next cycle after redirect
814    enqPtrExt := VecInit(enqPtrExt.map(_ - (lastCycleCancelCount + lastEnqCancel)))
815  }.otherwise {
816    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
817  }
818
819  deqPtrExtNext := deqPtrExt + commitCount
820  deqPtrExt := deqPtrExtNext
821
822  io.lqCancelCnt := RegNext(lastCycleCancelCount + lastEnqCancel)
823
824  /**
825    * misc
826    */
827  // perf counter
828  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
829  io.lqFull := !allowEnqueue
830  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
831  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
832  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
833  XSPerfAccumulate("refill", io.dcache.valid)
834  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
835  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
836  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
837
838  val perfEvents = Seq(
839    ("rollback         ", io.rollback.valid                                                               ),
840    ("mmioCycle        ", uncacheState =/= s_idle                                                         ),
841    ("mmio_Cnt         ", io.uncache.req.fire()                                                           ),
842    ("refill           ", io.dcache.valid                                                                 ),
843    ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))                                  ),
844    ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))                       ),
845    ("ltq_1_4_valid    ", (validCount < (LoadQueueSize.U/4.U))                                            ),
846    ("ltq_2_4_valid    ", (validCount > (LoadQueueSize.U/4.U)) & (validCount <= (LoadQueueSize.U/2.U))    ),
847    ("ltq_3_4_valid    ", (validCount > (LoadQueueSize.U/2.U)) & (validCount <= (LoadQueueSize.U*3.U/4.U))),
848    ("ltq_4_4_valid    ", (validCount > (LoadQueueSize.U*3.U/4.U))                                        )
849  )
850  generatePerfEvent()
851
852  // debug info
853  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
854
855  def PrintFlag(flag: Bool, name: String): Unit = {
856    when(flag) {
857      XSDebug(false, true.B, name)
858    }.otherwise {
859      XSDebug(false, true.B, " ")
860    }
861  }
862
863  for (i <- 0 until LoadQueueSize) {
864    XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i))
865    PrintFlag(allocated(i), "a")
866    PrintFlag(allocated(i) && datavalid(i), "v")
867    PrintFlag(allocated(i) && writebacked(i), "w")
868    PrintFlag(allocated(i) && miss(i), "m")
869    PrintFlag(allocated(i) && pending(i), "p")
870    XSDebug(false, true.B, "\n")
871  }
872
873}
874