xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 0f0389247d954d0a33001fd5dfee0f268a4e1712)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.fu.fpu.FPU
26import xiangshan.backend.rob.RobLsqIO
27import xiangshan.cache._
28import xiangshan.frontend.FtqPtr
29import xiangshan.ExceptionNO._
30import chisel3.ExcitingUtils
31import xiangshan.cache.dcache.ReplayCarry
32
33class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
34  p => p(XSCoreParamsKey).LoadQueueSize
35){
36}
37
38object LqPtr {
39  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
40    val ptr = Wire(new LqPtr)
41    ptr.flag := f
42    ptr.value := v
43    ptr
44  }
45}
46
47trait HasLoadHelper { this: XSModule =>
48  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
49    val fpWen = uop.ctrl.fpWen
50    LookupTree(uop.ctrl.fuOpType, List(
51      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
52      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
53      /*
54          riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values
55          Any operation that writes a narrower result to an f register must write
56          all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value.
57      */
58      LSUOpType.lw   -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)),
59      LSUOpType.ld   -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)),
60      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
61      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
62      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
63    ))
64  }
65}
66
67class LqEnqIO(implicit p: Parameters) extends XSBundle {
68  val canAccept = Output(Bool())
69  val sqCanAccept = Input(Bool())
70  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
71  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
72  val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr))
73}
74
75class LqPaddrWriteBundle(implicit p: Parameters) extends XSBundle {
76  val paddr = Output(UInt(PAddrBits.W))
77  val lqIdx = Output(new LqPtr)
78}
79
80class LqVaddrWriteBundle(implicit p: Parameters) extends XSBundle {
81  val vaddr = Output(UInt(VAddrBits.W))
82  val lqIdx = Output(new LqPtr)
83}
84
85class LqTriggerIO(implicit p: Parameters) extends XSBundle {
86  val hitLoadAddrTriggerHitVec = Input(Vec(3, Bool()))
87  val lqLoadAddrTriggerHitVec = Output(Vec(3, Bool()))
88}
89
90class LoadQueueIOBundle(implicit p: Parameters) extends XSBundle {
91  val enq = new LqEnqIO
92  val brqRedirect = Flipped(ValidIO(new Redirect))
93  val loadOut = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle)) // select load from lq to load pipeline
94  val loadPaddrIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqPaddrWriteBundle)))
95  val loadVaddrIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqVaddrWriteBundle)))
96  val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqWriteBundle)))
97  val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
98  val s2_load_data_forwarded = Vec(LoadPipelineWidth, Input(Bool()))
99  val s3_delayed_load_error = Vec(LoadPipelineWidth, Input(Bool()))
100  val s2_dcache_require_replay = Vec(LoadPipelineWidth, Input(Bool()))
101  val s3_replay_from_fetch = Vec(LoadPipelineWidth, Input(Bool()))
102  val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
103  val ldRawDataOut = Vec(2, Output(new LoadDataFromLQBundle))
104  val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed
105  val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO))
106  val rob = Flipped(new RobLsqIO)
107  val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
108  val refill = Flipped(ValidIO(new Refill)) // TODO: to be renamed
109  val release = Flipped(ValidIO(new Release))
110  val uncache = new UncacheWordIO
111  val exceptionAddr = new ExceptionAddrIO
112  val lqFull = Output(Bool())
113  val lqCancelCnt = Output(UInt(log2Up(LoadQueueSize + 1).W))
114  val trigger = Vec(LoadPipelineWidth, new LqTriggerIO)
115
116  // for load replay (recieve feedback from load pipe line)
117  val replayFast = Vec(LoadPipelineWidth, Flipped(new LoadToLsqFastIO))
118  val replaySlow = Vec(LoadPipelineWidth, Flipped(new LoadToLsqSlowIO))
119
120  val storeDataValidVec = Vec(StoreQueueSize, Input(Bool()))
121
122  val tlbReplayDelayCycleCtrl = Vec(4, Input(UInt(ReSelectLen.W)))
123}
124
125// Load Queue
126class LoadQueue(implicit p: Parameters) extends XSModule
127  with HasDCacheParameters
128  with HasCircularQueuePtrHelper
129  with HasLoadHelper
130  with HasPerfEvents
131{
132  val io = IO(new LoadQueueIOBundle())
133
134  // dontTouch(io)
135
136  println("LoadQueue: size:" + LoadQueueSize)
137
138  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
139  val replayCarryReg = RegInit(VecInit(List.fill(LoadQueueSize)(ReplayCarry(0.U, false.B))))
140  // val data = Reg(Vec(LoadQueueSize, new LsRobEntry))
141  val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumWrite = LoadPipelineWidth))
142  dataModule.io := DontCare
143  // vaddrModule's read port 0 for exception addr, port 1 for uncache vaddr read, port {2, 3} for load replay
144  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1 + 1 + LoadPipelineWidth, numWrite = LoadPipelineWidth))
145  vaddrModule.io := DontCare
146  val vaddrTriggerResultModule = Module(new SyncDataModuleTemplate(Vec(3, Bool()), LoadQueueSize, numRead = LoadPipelineWidth, numWrite = LoadPipelineWidth))
147  vaddrTriggerResultModule.io := DontCare
148  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
149  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
150  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
151  val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache
152  val error = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been corrupted
153  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
154  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
155  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
156  val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
157
158  /**
159    * used for load replay control
160    */
161
162  val tlb_hited = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
163  val ld_ld_check_ok = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
164  val st_ld_check_ok = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
165  val cache_bank_no_conflict = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
166  val cache_no_replay = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
167  val forward_data_valid = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
168  val cache_hited = RegInit(VecInit(List.fill(LoadQueueSize)(true.B)))
169
170
171  /**
172    * used for re-select control
173    */
174
175  val credit = RegInit(VecInit(List.fill(LoadQueueSize)(0.U(ReSelectLen.W))))
176
177  // ptrs to control which cycle to choose
178  val block_ptr_tlb = RegInit(VecInit(List.fill(LoadQueueSize)(0.U(2.W))))
179  val block_ptr_cache = RegInit(VecInit(List.fill(LoadQueueSize)(0.U(2.W))))
180  val block_ptr_others = RegInit(VecInit(List.fill(LoadQueueSize)(0.U(2.W))))
181
182  // specific cycles to block
183  val block_cycles_tlb = Reg(Vec(4, UInt(ReSelectLen.W)))
184  block_cycles_tlb := io.tlbReplayDelayCycleCtrl
185  val block_cycles_cache = RegInit(VecInit(Seq(11.U(ReSelectLen.W), 0.U(ReSelectLen.W), 31.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
186  val block_cycles_others = RegInit(VecInit(Seq(0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
187
188  val sel_blocked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B)))
189
190  // data forward block
191  val block_sq_idx = RegInit(VecInit(List.fill(LoadQueueSize)(0.U((log2Ceil(StoreQueueSize).W)))))
192  val block_by_data_forward_fail = RegInit(VecInit(List.fill(LoadQueueSize)(false.B)))
193
194  // dcache miss block
195  val miss_mshr_id = RegInit(VecInit(List.fill(LoadQueueSize)(0.U((log2Up(cfg.nMissEntries).W)))))
196  val block_by_cache_miss = RegInit(VecInit(List.fill(LoadQueueSize)(false.B)))
197
198  val true_cache_miss_replay = WireInit(VecInit(List.fill(LoadQueueSize)(false.B)))
199  (0 until LoadQueueSize).map{i => {
200    true_cache_miss_replay(i) := tlb_hited(i) && ld_ld_check_ok(i) && st_ld_check_ok(i) && cache_bank_no_conflict(i) &&
201                                 cache_no_replay(i) && forward_data_valid(i) && !cache_hited(i)
202  }}
203
204  val creditUpdate = WireInit(VecInit(List.fill(LoadQueueSize)(0.U(ReSelectLen.W))))
205
206  credit := creditUpdate
207
208  (0 until LoadQueueSize).map(i => {
209    creditUpdate(i) := Mux(credit(i) > 0.U(ReSelectLen.W), credit(i) - 1.U(ReSelectLen.W), credit(i))
210    sel_blocked(i) := creditUpdate(i) =/= 0.U(ReSelectLen.W)
211  })
212
213  (0 until LoadQueueSize).map(i => {
214    block_by_data_forward_fail(i) := Mux(block_by_data_forward_fail(i) === true.B && io.storeDataValidVec(block_sq_idx(i)) === true.B , false.B, block_by_data_forward_fail(i))
215  })
216
217  (0 until LoadQueueSize).map(i => {
218    block_by_cache_miss(i) := Mux(block_by_cache_miss(i) === true.B && io.refill.valid && io.refill.bits.id === miss_mshr_id(i), false.B, block_by_cache_miss(i))
219    when(creditUpdate(i) === 0.U && block_by_cache_miss(i) === true.B) {
220      block_by_cache_miss(i) := false.B
221    }
222    when(block_by_cache_miss(i) === true.B && io.refill.valid && io.refill.bits.id === miss_mshr_id(i)) {
223      creditUpdate(i) := 0.U
224    }
225  })
226
227  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
228  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
229
230  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr))))
231  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
232  val deqPtrExtNext = Wire(new LqPtr)
233
234  val enqPtr = enqPtrExt(0).value
235  val deqPtr = deqPtrExt.value
236
237  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
238  val allowEnqueue = validCount <= (LoadQueueSize - LoadPipelineWidth).U
239
240  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
241  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
242
243  val commitCount = RegNext(io.rob.lcommit)
244
245  val release1cycle = io.release
246  val release2cycle = RegNext(io.release)
247  val release2cycle_dup_lsu = RegNext(io.release)
248
249  /**
250    * Enqueue at dispatch
251    *
252    * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth
253    */
254  io.enq.canAccept := allowEnqueue
255
256  val canEnqueue = io.enq.req.map(_.valid)
257  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
258  for (i <- 0 until io.enq.req.length) {
259    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
260    val lqIdx = enqPtrExt(offset)
261    val index = io.enq.req(i).bits.lqIdx.value
262    when (canEnqueue(i) && !enqCancel(i)) {
263      uop(index) := io.enq.req(i).bits
264      // NOTE: the index will be used when replay
265      uop(index).lqIdx := lqIdx
266      allocated(index) := true.B
267      datavalid(index) := false.B
268      writebacked(index) := false.B
269      released(index) := false.B
270      miss(index) := false.B
271      pending(index) := false.B
272      error(index) := false.B
273
274      /**
275        * used for load replay control
276        */
277      tlb_hited(index) := true.B
278      ld_ld_check_ok(index) := true.B
279      st_ld_check_ok(index) := true.B
280      cache_bank_no_conflict(index) := true.B
281      cache_no_replay(index) := true.B
282      forward_data_valid(index) := true.B
283      cache_hited(index) := true.B
284
285      /**
286        * used for delaying load(block-ptr to control how many cycles to block)
287        */
288      credit(index) := 0.U(ReSelectLen.W)
289      block_ptr_tlb(index) := 0.U(2.W)
290      block_ptr_cache(index) := 0.U(2.W)
291      block_ptr_others(index) := 0.U(2.W)
292
293      block_by_data_forward_fail(index) := false.B
294      block_by_cache_miss(index) := false.B
295
296      XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n")
297      XSError(index =/= lqIdx.value, s"must be the same entry $i\n")
298    }
299    io.enq.resp(i) := lqIdx
300  }
301  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
302
303  val lastCycleRedirect = RegNext(io.brqRedirect)
304  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
305
306  // replay logic
307  // replay is splited into 2 stages
308
309  // stage1: select 2 entries and read their vaddr
310  val s0_block_load_mask = WireInit(VecInit((0 until LoadQueueSize).map(x=>false.B)))
311  val s1_block_load_mask = RegNext(s0_block_load_mask)
312  val s2_block_load_mask = RegNext(s1_block_load_mask)
313
314  val loadReplaySel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
315  val loadReplaySelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
316
317  val loadReplaySelVec = VecInit((0 until LoadQueueSize).map(i => {
318    val blocked = s1_block_load_mask(i) || s2_block_load_mask(i) || sel_blocked(i) || block_by_data_forward_fail(i) || block_by_cache_miss(i)
319    allocated(i) && (!tlb_hited(i) || !ld_ld_check_ok(i) || !st_ld_check_ok(i) || !cache_bank_no_conflict(i) || !cache_no_replay(i) || !forward_data_valid(i) || !cache_hited(i)) && !blocked
320  })).asUInt() // use uint instead vec to reduce verilog lines
321
322  val remReplayDeqMask = Seq.tabulate(LoadPipelineWidth)(getRemBits(deqMask)(_))
323
324  // generate lastCycleSelect mask
325  val remReplayFireMask = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(UIntToOH(loadReplaySel(rem)))(rem))
326
327  val loadReplayRemSelVecFire = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(loadReplaySelVec)(rem) & ~remReplayFireMask(rem))
328  val loadReplayRemSelVecNotFire = Seq.tabulate(LoadPipelineWidth)(getRemBits(loadReplaySelVec)(_))
329
330  val replayRemFire = Seq.tabulate(LoadPipelineWidth)(rem => WireInit(false.B))
331
332  val loadReplayRemSel = Seq.tabulate(LoadPipelineWidth)(rem => Mux(
333    replayRemFire(rem),
334    getFirstOne(toVec(loadReplayRemSelVecFire(rem)), remReplayDeqMask(rem)),
335    getFirstOne(toVec(loadReplayRemSelVecNotFire(rem)), remReplayDeqMask(rem))
336  ))
337
338  val loadReplaySelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
339  val loadReplaySelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
340
341  (0 until LoadPipelineWidth).foreach(index => {
342    loadReplaySelGen(index) := (
343      if (LoadPipelineWidth > 1) Cat(loadReplayRemSel(index), index.U(log2Ceil(LoadPipelineWidth).W))
344      else loadReplayRemSel(index)
345    )
346    loadReplaySelVGen(index) := Mux(replayRemFire(index), loadReplayRemSelVecFire(index).asUInt.orR, loadReplayRemSelVecNotFire(index).asUInt.orR)
347  })
348
349  (0 until LoadPipelineWidth).map(i => {
350    vaddrModule.io.raddr(LoadPipelineWidth + i) := loadReplaySelGen(i)
351  })
352
353  (0 until LoadPipelineWidth).map(i => {
354    loadReplaySel(i) := RegNext(loadReplaySelGen(i))
355    loadReplaySelV(i) := RegNext(loadReplaySelVGen(i), init = false.B)
356  })
357
358  // stage2: replay to load pipeline (if no load in S0)
359  (0 until LoadPipelineWidth).map(i => {
360    when(replayRemFire(i)) {
361      s0_block_load_mask(loadReplaySel(i)) := true.B
362    }
363  })
364
365  // init
366  (0 until LoadPipelineWidth).map(i => {
367    replayRemFire(i) := false.B
368  })
369
370  for(i <- 0 until LoadPipelineWidth) {
371    val replayIdx = loadReplaySel(i)
372    val notRedirectLastCycle = !uop(replayIdx).robIdx.needFlush(RegNext(io.brqRedirect))
373
374    io.loadOut(i).valid := loadReplaySelV(i) && notRedirectLastCycle
375
376    io.loadOut(i).bits := DontCare
377    io.loadOut(i).bits.uop := uop(replayIdx)
378    io.loadOut(i).bits.vaddr := vaddrModule.io.rdata(LoadPipelineWidth + i)
379    io.loadOut(i).bits.mask := genWmask(vaddrModule.io.rdata(LoadPipelineWidth + i), uop(replayIdx).ctrl.fuOpType(1,0))
380    io.loadOut(i).bits.isFirstIssue := false.B
381    io.loadOut(i).bits.isLoadReplay := true.B
382    io.loadOut(i).bits.replayCarry := replayCarryReg(replayIdx)
383    io.loadOut(i).bits.mshrid := miss_mshr_id(replayIdx)
384    io.loadOut(i).bits.forward_tlDchannel := true_cache_miss_replay(replayIdx)
385
386    when(io.loadOut(i).fire) {
387      replayRemFire(i) := true.B
388    }
389
390  }
391  /**
392    * Writeback load from load units
393    *
394    * Most load instructions writeback to regfile at the same time.
395    * However,
396    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
397    *   (2) For an mmio instruction without exceptions, it does not write back.
398    * The mmio instruction will be sent to lower level when it reaches ROB's head.
399    * After uncache response, it will write back through arbiter with loadUnit.
400    *   (3) For cache misses, it is marked miss and sent to dcache later.
401    * After cache refills, it will write back through arbiter with loadUnit.
402    */
403  for (i <- 0 until LoadPipelineWidth) {
404    dataModule.io.wb.wen(i) := false.B
405    dataModule.io.paddr.wen(i) := false.B
406    vaddrModule.io.wen(i) := false.B
407    vaddrTriggerResultModule.io.wen(i) := false.B
408    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
409
410    // most lq status need to be updated immediately after load writeback to lq
411    // flag bits in lq needs to be updated accurately
412    when(io.loadIn(i).fire()) {
413      when(io.loadIn(i).bits.miss) {
414        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x mask %x forwardData %x forwardMask: %x mmio %x\n",
415          io.loadIn(i).bits.uop.lqIdx.asUInt,
416          io.loadIn(i).bits.uop.cf.pc,
417          io.loadIn(i).bits.vaddr,
418          io.loadIn(i).bits.paddr,
419          io.loadIn(i).bits.mask,
420          io.loadIn(i).bits.forwardData.asUInt,
421          io.loadIn(i).bits.forwardMask.asUInt,
422          io.loadIn(i).bits.mmio
423        )
424      }.otherwise {
425        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x mask %x forwardData %x forwardMask: %x mmio %x\n",
426        io.loadIn(i).bits.uop.lqIdx.asUInt,
427        io.loadIn(i).bits.uop.cf.pc,
428        io.loadIn(i).bits.vaddr,
429        io.loadIn(i).bits.paddr,
430        io.loadIn(i).bits.mask,
431        io.loadIn(i).bits.forwardData.asUInt,
432        io.loadIn(i).bits.forwardMask.asUInt,
433        io.loadIn(i).bits.mmio
434      )}
435      if(EnableFastForward){
436        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss &&
437          !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
438          !io.s2_dcache_require_replay(i) // do not writeback if that inst will be resend from rs
439      } else {
440        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss &&
441          !io.loadIn(i).bits.mmio // mmio data is not valid until we finished uncache access
442      }
443      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
444
445      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
446      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
447
448      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
449      if(EnableFastForward){
450        miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i) && !io.s2_dcache_require_replay(i)
451      } else {
452        miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i)
453      }
454      pending(loadWbIndex) := io.loadIn(i).bits.mmio
455      released(loadWbIndex) := release2cycle.valid &&
456        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) ||
457        release1cycle.valid &&
458        io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release1cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)
459    }
460
461    // data bit in lq can be updated when load_s2 valid
462    // when(io.loadIn(i).bits.lq_data_wen){
463    //   val loadWbData = Wire(new LQDataEntry)
464    //   loadWbData.paddr := io.loadIn(i).bits.paddr
465    //   loadWbData.mask := io.loadIn(i).bits.mask
466    //   loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
467    //   loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
468    //   dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
469    //   dataModule.io.wb.wen(i) := true.B
470
471    //   // dirty code for load instr
472    //   uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
473    //   uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
474    //   uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
475    //   uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
476
477    //   vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
478    //   vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
479
480    //   vaddrTriggerResultModule.io.wen(i) := true.B
481    // }
482
483    // dirty code to reduce load_s2.valid fanout
484    when(io.loadIn(i).bits.lq_data_wen_dup(0)){
485      dataModule.io.wbWrite(i, loadWbIndex, io.loadIn(i).bits.mask)
486      dataModule.io.wb.wen(i) := true.B
487    }
488    // dirty code for load instr
489    when(io.loadIn(i).bits.lq_data_wen_dup(1)){
490      uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest
491    }
492    when(io.loadIn(i).bits.lq_data_wen_dup(2)){
493      uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf
494    }
495    when(io.loadIn(i).bits.lq_data_wen_dup(3)){
496      uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl
497    }
498    when(io.loadIn(i).bits.lq_data_wen_dup(4)){
499      uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
500    }
501    when(io.loadIn(i).bits.lq_data_wen_dup(5)){
502      vaddrTriggerResultModule.io.waddr(i) := loadWbIndex
503      vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec
504      vaddrTriggerResultModule.io.wen(i) := true.B
505    }
506
507    when(io.loadPaddrIn(i).valid) {
508      dataModule.io.paddr.wen(i) := true.B
509      dataModule.io.paddr.waddr(i) := io.loadPaddrIn(i).bits.lqIdx.value
510      dataModule.io.paddr.wdata(i) := io.loadPaddrIn(i).bits.paddr
511    }
512
513    // update vaddr in load S1
514    when(io.loadVaddrIn(i).valid) {
515      vaddrModule.io.wen(i) := true.B
516      vaddrModule.io.waddr(i) := io.loadVaddrIn(i).bits.lqIdx.value
517      vaddrModule.io.wdata(i) := io.loadVaddrIn(i).bits.vaddr
518    }
519
520    /**
521      * used for feedback and replay
522      */
523    when(io.replayFast(i).valid){
524      val idx = io.replayFast(i).ld_idx
525      val needreplay = !io.replayFast(i).ld_ld_check_ok || !io.replayFast(i).st_ld_check_ok || !io.replayFast(i).cache_bank_no_conflict
526
527      ld_ld_check_ok(idx) := io.replayFast(i).ld_ld_check_ok
528      st_ld_check_ok(idx) := io.replayFast(i).st_ld_check_ok
529      cache_bank_no_conflict(idx) := io.replayFast(i).cache_bank_no_conflict
530
531      when(needreplay) {
532        creditUpdate(idx) := block_cycles_others(block_ptr_others(idx))
533        block_ptr_others(idx) := Mux(block_ptr_others(idx) === 3.U(2.W), block_ptr_others(idx), block_ptr_others(idx) + 1.U(2.W))
534        // try to replay this load in next cycle
535        s1_block_load_mask(idx) := false.B
536        s2_block_load_mask(idx) := false.B
537
538        // replay this load in next cycle
539        loadReplaySelGen(idx(log2Ceil(LoadPipelineWidth) - 1, 0)) := idx
540        loadReplaySelVGen(idx(log2Ceil(LoadPipelineWidth) - 1, 0)) := true.B
541      }
542    }
543
544    when(io.replaySlow(i).valid){
545      val idx = io.replaySlow(i).ld_idx
546      val needreplay = !io.replaySlow(i).tlb_hited || !io.replaySlow(i).st_ld_check_ok || !io.replaySlow(i).cache_no_replay || !io.replaySlow(i).forward_data_valid || !io.replaySlow(i).cache_hited
547
548      tlb_hited(idx) := io.replaySlow(i).tlb_hited
549      st_ld_check_ok(idx) := io.replaySlow(i).st_ld_check_ok
550      cache_no_replay(idx) := io.replaySlow(i).cache_no_replay
551      forward_data_valid(idx) := io.replaySlow(i).forward_data_valid
552      replayCarryReg(idx) := io.replaySlow(i).replayCarry
553      cache_hited(idx) := io.replaySlow(i).cache_hited
554
555      val invalid_sq_idx = io.replaySlow(i).data_invalid_sq_idx
556
557      when(needreplay) {
558        // update credit and ptr
559        val data_in_last_beat = io.replaySlow(i).data_in_last_beat
560        creditUpdate(idx) := Mux( !io.replaySlow(i).tlb_hited, block_cycles_tlb(block_ptr_tlb(idx)),
561                              Mux(!io.replaySlow(i).cache_hited, block_cycles_cache(block_ptr_cache(idx)) + data_in_last_beat,
562                               Mux(!io.replaySlow(i).cache_no_replay || !io.replaySlow(i).st_ld_check_ok, block_cycles_others(block_ptr_others(idx)), 0.U)))
563        when(!io.replaySlow(i).tlb_hited) {
564          block_ptr_tlb(idx) := Mux(block_ptr_tlb(idx) === 3.U(2.W), block_ptr_tlb(idx), block_ptr_tlb(idx) + 1.U(2.W))
565        }.elsewhen(!io.replaySlow(i).cache_hited) {
566          block_ptr_cache(idx) := Mux(block_ptr_cache(idx) === 3.U(2.W), block_ptr_cache(idx), block_ptr_cache(idx) + 1.U(2.W))
567        }.elsewhen(!io.replaySlow(i).cache_no_replay || !io.replaySlow(i).st_ld_check_ok) {
568          block_ptr_others(idx) := Mux(block_ptr_others(idx) === 3.U(2.W), block_ptr_others(idx), block_ptr_others(idx) + 1.U(2.W))
569        }
570      }
571
572      // special case: data forward fail
573      block_by_data_forward_fail(idx) := false.B
574
575      when(!io.replaySlow(i).forward_data_valid && io.replaySlow(i).tlb_hited) {
576        when(!io.storeDataValidVec(invalid_sq_idx)) {
577          block_by_data_forward_fail(idx) := true.B
578          block_sq_idx(idx) := invalid_sq_idx
579        }
580      }
581
582      // special case: cache miss
583      miss_mshr_id(idx) := io.replaySlow(i).miss_mshr_id
584      block_by_cache_miss(idx) := io.replaySlow(i).tlb_hited && io.replaySlow(i).cache_no_replay && io.replaySlow(i).st_ld_check_ok && // this load tlb hit and no cache replay
585                                  !io.replaySlow(i).cache_hited && !io.replaySlow(i).can_forward_full_data && // cache miss
586                                  !(io.refill.valid && io.refill.bits.id === io.replaySlow(i).miss_mshr_id) && // no refill in this cycle
587                                  creditUpdate(idx) =/= 0.U // credit is not zero
588    }
589
590  }
591
592  when(io.refill.valid) {
593    XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data)
594  }
595
596  // NOTE: we don't refill data from dcache now!
597
598  val s2_dcache_require_replay = WireInit(VecInit((0 until LoadPipelineWidth).map(i =>{
599    RegNext(io.loadIn(i).fire()) && RegNext(io.s2_dcache_require_replay(i))
600  })))
601  dontTouch(s2_dcache_require_replay)
602
603  for (i <- 0 until LoadPipelineWidth) {
604    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
605    val lastCycleLoadWbIndex = RegNext(loadWbIndex)
606    // update miss state in load s3
607    if(!EnableFastForward){
608      // s2_dcache_require_replay will be used to update lq flag 1 cycle after for better timing
609      //
610      // io.s2_dcache_require_replay comes from dcache miss req reject, which is quite slow to generate
611      when(s2_dcache_require_replay(i)) {
612        // do not writeback if that inst will be resend from rs
613        // rob writeback will not be triggered by a refill before inst replay
614        miss(lastCycleLoadWbIndex) := false.B // disable refill listening
615        datavalid(lastCycleLoadWbIndex) := false.B // disable refill listening
616        assert(!datavalid(lastCycleLoadWbIndex))
617      }
618    }
619    // update load error state in load s3
620    when(RegNext(io.loadIn(i).fire()) && io.s3_delayed_load_error(i)){
621      uop(lastCycleLoadWbIndex).cf.exceptionVec(loadAccessFault) := true.B
622    }
623    // update inst replay from fetch flag in s3
624    when(RegNext(io.loadIn(i).fire()) && io.s3_replay_from_fetch(i)){
625      uop(lastCycleLoadWbIndex).ctrl.replayInst := true.B
626    }
627  }
628
629  /**
630    * Load commits
631    *
632    * When load commited, mark it as !allocated and move deqPtrExt forward.
633    */
634  (0 until CommitWidth).map(i => {
635    when(commitCount > i.U){
636      allocated((deqPtrExt+i.U).value) := false.B
637      XSError(!allocated((deqPtrExt+i.U).value), s"why commit invalid entry $i?\n")
638    }
639  })
640
641  def toVec(a: UInt): Vec[Bool] = {
642    VecInit(a.asBools)
643  }
644
645  def getRemBits(input: UInt)(rem: Int): UInt = {
646    VecInit((0 until LoadQueueSize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt
647  }
648
649  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
650    val length = mask.length
651    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
652    val highBitsUint = Cat(highBits.reverse)
653    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
654  }
655
656  def getOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
657    assert(valid.length == bits.length)
658    assert(isPow2(valid.length))
659    if (valid.length == 1) {
660      (valid, bits)
661    } else if (valid.length == 2) {
662      val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0)))))
663      for (i <- res.indices) {
664        res(i).valid := valid(i)
665        res(i).bits := bits(i)
666      }
667      val oldest = Mux(valid(0) && valid(1), Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx), res(1), res(0)), Mux(valid(0) && !valid(1), res(0), res(1)))
668      (Seq(oldest.valid), Seq(oldest.bits))
669    } else {
670      val left = getOldest(valid.take(valid.length / 2), bits.take(valid.length / 2))
671      val right = getOldest(valid.takeRight(valid.length / 2), bits.takeRight(valid.length / 2))
672      getOldest(left._1 ++ right._1, left._2 ++ right._2)
673    }
674  }
675
676  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
677    assert(valid.length == uop.length)
678    val length = valid.length
679    (0 until length).map(i => {
680      (0 until length).map(j => {
681        Mux(valid(i) && valid(j),
682          isAfter(uop(i).robIdx, uop(j).robIdx),
683          Mux(!valid(i), true.B, false.B))
684      })
685    })
686  }
687
688
689  /**
690    * Store-Load Memory violation detection
691    *
692    * When store writes back, it searches LoadQueue for younger load instructions
693    * with the same load physical address. They loaded wrong data and need re-execution.
694    *
695    * Cycle 0: Store Writeback
696    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
697    * Cycle 1: Redirect Generation
698    *   There're up to 2 possible redirect requests.
699    *   Choose the oldest load (part 1).
700    * Cycle 2: Redirect Fire
701    *   Choose the oldest load (part 2).
702    *   Prepare redirect request according to the detected violation.
703    *   Fire redirect request (if valid)
704    */
705
706  // stage 0:        lq                 lq
707  //                 |                  |  (paddr match)
708  // stage 1:        lq                 lq
709  //                 |                  |
710  //                 |                  |
711  //                 |                  |
712  // stage 2:        lq                 lq
713  //                 |                  |
714  //                 --------------------
715  //                          |
716  //                      rollback req
717  io.load_s1 := DontCare
718def detectRollback(i: Int) = {
719    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
720    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
721    val xorMask = lqIdxMask ^ enqMask
722    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
723    val stToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
724
725    // check if load already in lq needs to be rolledback
726    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
727    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
728    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
729    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
730      allocated(j) && stToEnqPtrMask(j) && datavalid(j)
731    })))
732    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
733      addrMaskMatch(j) && entryNeedCheck(j)
734    }))
735    val lqViolation = lqViolationVec.asUInt().orR() && RegNext(!io.storeIn(i).bits.miss)
736    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
737    val lqViolationUop = uop(lqViolationIndex)
738    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
739    // lqViolationUop.lqIdx.value := lqViolationIndex
740    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
741
742    XSDebug(
743      lqViolation,
744      "need rollback (ld wb before store) pc %x robidx %d target %x\n",
745      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt
746    )
747
748    (lqViolation, lqViolationUop)
749  }
750
751  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
752    Mux(
753      a.valid,
754      Mux(
755        b.valid,
756        Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest
757        a // sel a
758      ),
759      b // sel b
760    )
761  }
762
763  // S2: select rollback (part1) and generate rollback request
764  // rollback check
765  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
766  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
767  // store ftq index for store set update
768  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
769  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
770  for (i <- 0 until StorePipelineWidth) {
771    val detectedRollback = detectRollback(i)
772    rollbackLq(i).valid := detectedRollback._1 && RegNext(io.storeIn(i).valid)
773    rollbackLq(i).bits.uop := detectedRollback._2
774    rollbackLq(i).bits.flag := i.U
775    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
776    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
777  }
778
779  val rollbackLqVReg = rollbackLq.map(x => RegNext(x.valid))
780  val rollbackLqReg = rollbackLq.map(x => RegEnable(x.bits, x.valid))
781
782  // S3: select rollback (part2), generate rollback request, then fire rollback request
783  // Note that we use robIdx - 1.U to flush the load instruction itself.
784  // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect.
785
786  // select uop in parallel
787  val lqs = getOldest(rollbackLqVReg, rollbackLqReg)
788  val rollbackUopExt = lqs._2(0)
789  val stFtqIdxS3 = RegNext(stFtqIdxS2)
790  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
791  val rollbackUop = rollbackUopExt.uop
792  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
793  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
794
795  // check if rollback request is still valid in parallel
796  io.rollback.bits.robIdx := rollbackUop.robIdx
797  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
798  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
799  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
800  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
801  io.rollback.bits.level := RedirectLevel.flush
802  io.rollback.bits.interrupt := DontCare
803  io.rollback.bits.cfiUpdate := DontCare
804  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
805  io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id
806  // io.rollback.bits.pc := DontCare
807
808  io.rollback.valid := rollbackLqVReg.reduce(_|_) &&
809                        (!lastCycleRedirect.valid || isBefore(rollbackUop.robIdx, lastCycleRedirect.bits.robIdx)) &&
810                        (!lastlastCycleRedirect.valid || isBefore(rollbackUop.robIdx, lastlastCycleRedirect.bits.robIdx))
811
812  when(io.rollback.valid) {
813    // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt)
814  }
815
816  /**
817  * Load-Load Memory violation detection
818  *
819  * When load arrives load_s1, it searches LoadQueue for younger load instructions
820  * with the same load physical address. If younger load has been released (or observed),
821  * the younger load needs to be re-execed.
822  *
823  * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst,
824  * the two loads will be replayed if the older load becomes the head of rob.
825  *
826  * When dcache releases a line, mark all writebacked entrys in load queue with
827  * the same line paddr as released.
828  */
829
830  // Load-Load Memory violation query
831  val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize)
832  (0 until LoadPipelineWidth).map(i => {
833    dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr
834    io.loadViolationQuery(i).req.ready := true.B
835    io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire())
836    // Generate real violation mask
837    // Note that we use UIntToMask.rightmask here
838    val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value
839    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
840    val xorMask = lqIdxMask ^ enqMask
841    val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === enqPtrExt(0).flag
842    val ldToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
843    val ldld_violation_mask_gen_1 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
844      ldToEnqPtrMask(j) && // the load is younger than current load
845      allocated(j) && // entry is valid
846      released(j) && // cacheline is released
847      (datavalid(j) || miss(j)) // paddr is valid
848    })))
849    val ldld_violation_mask_gen_2 = WireInit(VecInit((0 until LoadQueueSize).map(j => {
850      dataModule.io.release_violation(i).match_mask(j)// addr match
851      // addr match result is slow to generate, we RegNext() it
852    })))
853    val ldld_violation_mask = RegNext(ldld_violation_mask_gen_1).asUInt & RegNext(ldld_violation_mask_gen_2).asUInt
854    dontTouch(ldld_violation_mask)
855    ldld_violation_mask.suggestName("ldldViolationMask_" + i)
856    io.loadViolationQuery(i).resp.bits.have_violation := ldld_violation_mask.orR
857  })
858
859  // "released" flag update
860  //
861  // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to
862  // update release flag in 1 cycle
863
864  when(release1cycle.valid){
865    // Take over ld-ld paddr cam port
866    dataModule.io.release_violation.takeRight(1)(0).paddr := release1cycle.bits.paddr
867    io.loadViolationQuery.takeRight(1)(0).req.ready := false.B
868  }
869
870  when(release2cycle.valid){
871    // If a load comes in that cycle, we can not judge if it has ld-ld violation
872    // We replay that load inst from RS
873    io.loadViolationQuery.map(i => i.req.ready :=
874      // use lsu side release2cycle_dup_lsu paddr for better timing
875      !i.req.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle_dup_lsu.bits.paddr(PAddrBits-1, DCacheLineOffset)
876    )
877    // io.loadViolationQuery.map(i => i.req.ready := false.B) // For better timing
878  }
879
880  (0 until LoadQueueSize).map(i => {
881    when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) &&
882      allocated(i) &&
883      datavalid(i) &&
884      release1cycle.valid
885    )){
886      // Note: if a load has missed in dcache and is waiting for refill in load queue,
887      // its released flag still needs to be set as true if addr matches.
888      released(i) := true.B
889    }
890  })
891
892  /**
893    * Memory mapped IO / other uncached operations
894    *
895    * States:
896    * (1) writeback from store units: mark as pending
897    * (2) when they reach ROB's head, they can be sent to uncache channel
898    * (3) response from uncache channel: mark as datavalid
899    * (4) writeback to ROB (and other units): mark as writebacked
900    * (5) ROB commits the instruction: same as normal instructions
901    */
902  //(2) when they reach ROB's head, they can be sent to uncache channel
903  val lqTailMmioPending = WireInit(pending(deqPtr))
904  val lqTailAllocated = WireInit(allocated(deqPtr))
905  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
906  val uncacheState = RegInit(s_idle)
907  switch(uncacheState) {
908    is(s_idle) {
909      when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) {
910        uncacheState := s_req
911      }
912    }
913    is(s_req) {
914      when(io.uncache.req.fire()) {
915        uncacheState := s_resp
916      }
917    }
918    is(s_resp) {
919      when(io.uncache.resp.fire()) {
920        uncacheState := s_wait
921      }
922    }
923    is(s_wait) {
924      when(RegNext(io.rob.commit)) {
925        uncacheState := s_idle // ready for next mmio
926      }
927    }
928  }
929
930  // used for uncache commit
931  val uncacheData = RegInit(0.U(XLEN.W))
932  val uncacheCommitFired = RegInit(false.B)
933
934  when(uncacheState === s_req) {
935    uncacheCommitFired := false.B
936  }
937
938  io.uncache.req.valid := uncacheState === s_req
939
940  dataModule.io.uncache.raddr := deqPtrExtNext.value
941
942  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
943  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
944  io.uncache.req.bits.data := DontCare
945  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
946  io.uncache.req.bits.id   := RegNext(deqPtrExtNext.value)
947  io.uncache.req.bits.instrtype := DontCare
948  io.uncache.req.bits.replayCarry := DontCare
949  io.uncache.req.bits.atomic := true.B
950
951  io.uncache.resp.ready := true.B
952
953  when (io.uncache.req.fire()) {
954    pending(deqPtr) := false.B
955
956    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
957      uop(deqPtr).cf.pc,
958      io.uncache.req.bits.addr,
959      io.uncache.req.bits.data,
960      io.uncache.req.bits.cmd,
961      io.uncache.req.bits.mask
962    )
963  }
964
965  // (3) response from uncache channel: mark as datavalid
966  when(io.uncache.resp.fire()){
967    datavalid(deqPtr) := true.B
968    uncacheData := io.uncache.resp.bits.data(XLEN-1, 0)
969
970    XSDebug("uncache resp: data %x\n", io.refill.bits.data)
971  }
972
973  // writeback mmio load, Note: only use ldout(0) to write back
974  //
975  // Int load writeback will finish (if not blocked) in one cycle
976  io.ldout(0).bits.uop := uop(deqPtr)
977  io.ldout(0).bits.uop.lqIdx := deqPtr.asTypeOf(new LqPtr)
978  io.ldout(0).bits.data := DontCare // not used
979  io.ldout(0).bits.redirectValid := false.B
980  io.ldout(0).bits.redirect := DontCare
981  io.ldout(0).bits.debug.isMMIO := true.B
982  io.ldout(0).bits.debug.isPerfCnt := false.B
983  io.ldout(0).bits.debug.paddr := debug_paddr(deqPtr)
984  io.ldout(0).bits.debug.vaddr := vaddrModule.io.rdata(1)
985  io.ldout(0).bits.fflags := DontCare
986
987  io.ldout(0).valid := (uncacheState === s_wait) && !uncacheCommitFired
988
989  io.ldout(1).bits := DontCare
990  io.ldout(1).valid := false.B
991
992  // merged data, uop and offset for data sel in load_s3
993  io.ldRawDataOut(0).lqData := uncacheData
994  io.ldRawDataOut(0).uop := io.ldout(0).bits.uop
995  io.ldRawDataOut(0).addrOffset := dataModule.io.uncache.rdata.paddr
996
997  io.ldRawDataOut(1) := DontCare
998
999  when(io.ldout(0).fire()){
1000    uncacheCommitFired := true.B
1001  }
1002
1003  XSPerfAccumulate("uncache_load_write_back", io.ldout(0).fire())
1004
1005  // Read vaddr for mem exception
1006  // no inst will be commited 1 cycle before tval update
1007  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
1008  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
1009
1010  // read vaddr for mmio, and only port {1} is used
1011  vaddrModule.io.raddr(1) := deqPtr
1012
1013  (0 until LoadPipelineWidth).map(i => {
1014    if(i == 0) {
1015      vaddrTriggerResultModule.io.raddr(i) := deqPtr
1016      io.trigger(i).lqLoadAddrTriggerHitVec := Mux(
1017        io.ldout(i).valid,
1018        vaddrTriggerResultModule.io.rdata(i),
1019        VecInit(Seq.fill(3)(false.B))
1020      )
1021    }else {
1022      vaddrTriggerResultModule.io.raddr(i) := DontCare
1023      io.trigger(i).lqLoadAddrTriggerHitVec := VecInit(Seq.fill(3)(false.B))
1024    }
1025    // vaddrTriggerResultModule.io.raddr(i) := loadWbSelGen(i)
1026    // io.trigger(i).lqLoadAddrTriggerHitVec := Mux(
1027    //   loadWbSelV(i),
1028    //   vaddrTriggerResultModule.io.rdata(i),
1029    //   VecInit(Seq.fill(3)(false.B))
1030    // )
1031  })
1032
1033  // misprediction recovery / exception redirect
1034  // invalidate lq term using robIdx
1035  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
1036  for (i <- 0 until LoadQueueSize) {
1037    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i)
1038    when (needCancel(i)) {
1039      allocated(i) := false.B
1040    }
1041  }
1042
1043  /**
1044    * update pointers
1045    */
1046  val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2))))
1047  val lastCycleCancelCount = PopCount(RegNext(needCancel))
1048  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U)
1049  when (lastCycleRedirect.valid) {
1050    // we recover the pointers in the next cycle after redirect
1051    enqPtrExt := VecInit(enqPtrExt.map(_ - (lastCycleCancelCount + lastEnqCancel)))
1052  }.otherwise {
1053    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
1054  }
1055
1056  deqPtrExtNext := deqPtrExt + commitCount
1057  deqPtrExt := deqPtrExtNext
1058
1059  io.lqCancelCnt := RegNext(lastCycleCancelCount + lastEnqCancel)
1060
1061  /**
1062    * misc
1063    */
1064  // perf counter
1065  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
1066  io.lqFull := !allowEnqueue
1067  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
1068  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
1069  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
1070  XSPerfAccumulate("refill", io.refill.valid)
1071  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
1072  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
1073  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
1074
1075  if (env.EnableTopDown) {
1076    val stall_loads_bound = WireDefault(0.B)
1077    ExcitingUtils.addSink(stall_loads_bound, "stall_loads_bound", ExcitingUtils.Perf)
1078    val have_miss_entry = (allocated zip miss).map(x => x._1 && x._2).reduce(_ || _)
1079    val l1d_loads_bound = stall_loads_bound && !have_miss_entry
1080    ExcitingUtils.addSource(l1d_loads_bound, "l1d_loads_bound", ExcitingUtils.Perf)
1081    XSPerfAccumulate("l1d_loads_bound", l1d_loads_bound)
1082    val stall_l1d_load_miss = stall_loads_bound && have_miss_entry
1083    ExcitingUtils.addSource(stall_l1d_load_miss, "stall_l1d_load_miss", ExcitingUtils.Perf)
1084    ExcitingUtils.addSink(WireInit(0.U), "stall_l1d_load_miss", ExcitingUtils.Perf)
1085  }
1086
1087  val perfValidCount = RegNext(validCount)
1088
1089  val perfEvents = Seq(
1090    ("rollback         ", io.rollback.valid),
1091    ("mmioCycle        ", uncacheState =/= s_idle),
1092    ("mmio_Cnt         ", io.uncache.req.fire()),
1093    ("refill           ", io.refill.valid),
1094    ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))),
1095    ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))),
1096    ("ltq_1_4_valid    ", (perfValidCount < (LoadQueueSize.U/4.U))),
1097    ("ltq_2_4_valid    ", (perfValidCount > (LoadQueueSize.U/4.U)) & (perfValidCount <= (LoadQueueSize.U/2.U))),
1098    ("ltq_3_4_valid    ", (perfValidCount > (LoadQueueSize.U/2.U)) & (perfValidCount <= (LoadQueueSize.U*3.U/4.U))),
1099    ("ltq_4_4_valid    ", (perfValidCount > (LoadQueueSize.U*3.U/4.U)))
1100  )
1101  generatePerfEvent()
1102
1103  // debug info
1104  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
1105
1106  def PrintFlag(flag: Bool, name: String): Unit = {
1107    when(flag) {
1108      XSDebug(false, true.B, name)
1109    }.otherwise {
1110      XSDebug(false, true.B, " ")
1111    }
1112  }
1113
1114  for (i <- 0 until LoadQueueSize) {
1115    XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i))
1116    PrintFlag(allocated(i), "a")
1117    PrintFlag(allocated(i) && datavalid(i), "v")
1118    PrintFlag(allocated(i) && writebacked(i), "w")
1119    PrintFlag(allocated(i) && miss(i), "m")
1120    PrintFlag(allocated(i) && pending(i), "p")
1121    XSDebug(false, true.B, "\n")
1122  }
1123
1124}
1125