xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreQueue.scala (revision 44f2941b36bd01d0dea9e5e076949f6438c0014d)
1/***************************************************************************************
2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences
4* Copyright (c) 2020-2021 Peng Cheng Laboratory
5*
6* XiangShan is licensed under Mulan PSL v2.
7* You can use this software according to the terms and conditions of the Mulan PSL v2.
8* You may obtain a copy of Mulan PSL v2 at:
9*          http://license.coscl.org.cn/MulanPSL2
10*
11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14*
15* See the Mulan PSL v2 for more details.
16***************************************************************************************/
17
18package xiangshan.mem
19
20import chisel3._
21import chisel3.util._
22import difftest._
23import difftest.common.DifftestMem
24import org.chipsalliance.cde.config.Parameters
25import utility._
26import utils._
27import xiangshan._
28import xiangshan.cache._
29import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants}
30import xiangshan.backend._
31import xiangshan.backend.rob.{RobLsqIO, RobPtr}
32import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
33import xiangshan.backend.decode.isa.bitfield.{Riscv32BitInst, XSInstBitFields}
34import xiangshan.backend.fu.FuConfig._
35import xiangshan.backend.fu.FuType
36import xiangshan.ExceptionNO._
37import coupledL2.{CMOReq, CMOResp}
38
39class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr](
40  p => p(XSCoreParamsKey).StoreQueueSize
41){
42}
43
44object SqPtr {
45  def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = {
46    val ptr = Wire(new SqPtr)
47    ptr.flag := f
48    ptr.value := v
49    ptr
50  }
51}
52
53class SqEnqIO(implicit p: Parameters) extends MemBlockBundle {
54  val canAccept = Output(Bool())
55  val lqCanAccept = Input(Bool())
56  val needAlloc = Vec(LSQEnqWidth, Input(Bool()))
57  val req = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
58  val resp = Vec(LSQEnqWidth, Output(new SqPtr))
59}
60
61class DataBufferEntry (implicit p: Parameters)  extends DCacheBundle {
62  val addr   = UInt(PAddrBits.W)
63  val vaddr  = UInt(VAddrBits.W)
64  val data   = UInt(VLEN.W)
65  val mask   = UInt((VLEN/8).W)
66  val wline = Bool()
67  val sqPtr  = new SqPtr
68  val prefetch = Bool()
69  val vecValid = Bool()
70}
71
72class StoreExceptionBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
73  // The 1st StorePipelineWidth ports: sta exception generated at s1, except for af
74  // The 2nd StorePipelineWidth ports: sta af generated at s2
75  // The following VecStorePipelineWidth ports: vector st exception
76  // The last port: non-data error generated in SoC
77  val enqPortNum = StorePipelineWidth * 2 + VecStorePipelineWidth + 1
78
79  val io = IO(new Bundle() {
80    val redirect = Flipped(ValidIO(new Redirect))
81    val storeAddrIn = Vec(enqPortNum, Flipped(ValidIO(new LsPipelineBundle())))
82    val flushFrmMaBuf = Input(Bool())
83    val exceptionAddr = new ExceptionAddrIO
84  })
85
86  val req_valid = RegInit(false.B)
87  val req = Reg(new LsPipelineBundle())
88
89  // enqueue
90  // S1:
91  val s1_req = VecInit(io.storeAddrIn.map(_.bits))
92  val s1_valid = VecInit(io.storeAddrIn.map(x =>
93      x.valid && !x.bits.uop.robIdx.needFlush(io.redirect) && ExceptionNO.selectByFu(x.bits.uop.exceptionVec, StaCfg).asUInt.orR
94  ))
95
96  // S2: delay 1 cycle
97  val s2_req = (0 until enqPortNum).map(i =>
98    RegEnable(s1_req(i), s1_valid(i)))
99  val s2_valid = (0 until enqPortNum).map(i =>
100    RegNext(s1_valid(i)) && !s2_req(i).uop.robIdx.needFlush(io.redirect)
101  )
102
103  val s2_enqueue = Wire(Vec(enqPortNum, Bool()))
104  for (w <- 0 until enqPortNum) {
105    s2_enqueue(w) := s2_valid(w)
106  }
107
108  when (req_valid && req.uop.robIdx.needFlush(io.redirect)) {
109    req_valid := s2_enqueue.asUInt.orR
110  }.elsewhen (s2_enqueue.asUInt.orR) {
111    req_valid := req_valid || true.B
112  }
113
114  def selectOldest[T <: LsPipelineBundle](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
115    assert(valid.length == bits.length)
116    if (valid.length == 0 || valid.length == 1) {
117      (valid, bits)
118    } else if (valid.length == 2) {
119      val res = Seq.fill(2)(Wire(Valid(chiselTypeOf(bits(0)))))
120      for (i <- res.indices) {
121        res(i).valid := valid(i)
122        res(i).bits := bits(i)
123      }
124      val oldest = Mux(valid(0) && valid(1),
125        Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) ||
126          (isNotBefore(bits(0).uop.robIdx, bits(1).uop.robIdx) && bits(0).uop.uopIdx > bits(1).uop.uopIdx), res(1), res(0)),
127        Mux(valid(0) && !valid(1), res(0), res(1)))
128      (Seq(oldest.valid), Seq(oldest.bits))
129    } else {
130      val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2))
131      val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)))
132      selectOldest(left._1 ++ right._1, left._2 ++ right._2)
133    }
134  }
135
136  val reqSel = selectOldest(s2_enqueue, s2_req)
137
138  when (req_valid) {
139    req := Mux(
140      reqSel._1(0) && (isAfter(req.uop.robIdx, reqSel._2(0).uop.robIdx) || (isNotBefore(req.uop.robIdx, reqSel._2(0).uop.robIdx) && req.uop.uopIdx > reqSel._2(0).uop.uopIdx)),
141      reqSel._2(0),
142      req)
143  } .elsewhen (s2_enqueue.asUInt.orR) {
144    req := reqSel._2(0)
145  }
146
147  io.exceptionAddr.vaddr  := req.fullva
148  io.exceptionAddr.gpaddr := req.gpaddr
149  io.exceptionAddr.vstart := req.uop.vpu.vstart
150  io.exceptionAddr.vl     := req.uop.vpu.vl
151  io.exceptionAddr.isForVSnonLeafPTE:= req.isForVSnonLeafPTE
152
153  when(req_valid && io.flushFrmMaBuf) {
154    req_valid := false.B
155  }
156}
157
158// Store Queue
159class StoreQueue(implicit p: Parameters) extends XSModule
160  with HasDCacheParameters
161  with HasCircularQueuePtrHelper
162  with HasPerfEvents
163  with HasVLSUParameters {
164  val io = IO(new Bundle() {
165    val hartId = Input(UInt(hartIdLen.W))
166    val enq = new SqEnqIO
167    val brqRedirect = Flipped(ValidIO(new Redirect))
168    val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
169    val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included
170    val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception
171    val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // store data, send to sq from rs
172    val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // store mask, send to sq from rs
173    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag)) // write committed store to sbuffer
174    val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is, write committed store to sbuffer
175    val uncacheOutstanding = Input(Bool())
176    val cmoOpReq  = DecoupledIO(new CMOReq)
177    val cmoOpResp = Flipped(DecoupledIO(new CMOResp))
178    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
179    val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true))
180    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
181    // TODO: scommit is only for scalar store
182    val rob = Flipped(new RobLsqIO)
183    val uncache = new UncacheWordIO
184    // val refill = Flipped(Valid(new DCacheLineReq ))
185    val exceptionAddr = new ExceptionAddrIO
186    val flushSbuffer = new SbufferFlushBundle
187    val sqEmpty = Output(Bool())
188    val stAddrReadySqPtr = Output(new SqPtr)
189    val stAddrReadyVec = Output(Vec(StoreQueueSize, Bool()))
190    val stDataReadySqPtr = Output(new SqPtr)
191    val stDataReadyVec = Output(Vec(StoreQueueSize, Bool()))
192    val stIssuePtr = Output(new SqPtr)
193    val sqDeqPtr = Output(new SqPtr)
194    val sqFull = Output(Bool())
195    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize + 1).W))
196    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
197    val force_write = Output(Bool())
198    val maControl   = Flipped(new StoreMaBufToSqControlIO)
199  })
200
201  println("StoreQueue: size:" + StoreQueueSize)
202
203  // data modules
204  val uop = Reg(Vec(StoreQueueSize, new DynInst))
205  // val data = Reg(Vec(StoreQueueSize, new LsqEntry))
206  val dataModule = Module(new SQDataModule(
207    numEntries = StoreQueueSize,
208    numRead = EnsbufferWidth,
209    numWrite = StorePipelineWidth,
210    numForward = LoadPipelineWidth
211  ))
212  dataModule.io := DontCare
213  val paddrModule = Module(new SQAddrModule(
214    dataWidth = PAddrBits,
215    numEntries = StoreQueueSize,
216    numRead = EnsbufferWidth,
217    numWrite = StorePipelineWidth,
218    numForward = LoadPipelineWidth
219  ))
220  paddrModule.io := DontCare
221  val vaddrModule = Module(new SQAddrModule(
222    dataWidth = VAddrBits,
223    numEntries = StoreQueueSize,
224    numRead = EnsbufferWidth, // sbuffer; badvaddr will be sent from exceptionBuffer
225    numWrite = StorePipelineWidth,
226    numForward = LoadPipelineWidth
227  ))
228  vaddrModule.io := DontCare
229  val dataBuffer = Module(new DatamoduleResultBuffer(new DataBufferEntry))
230  val difftestBuffer = if (env.EnableDifftest) Some(Module(new DatamoduleResultBuffer(new DynInst))) else None
231  val exceptionBuffer = Module(new StoreExceptionBuffer)
232  exceptionBuffer.io.redirect := io.brqRedirect
233  exceptionBuffer.io.exceptionAddr.isStore := DontCare
234  // vlsu exception!
235  for (i <- 0 until VecStorePipelineWidth) {
236    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).valid               := io.vecFeedback(i).valid && io.vecFeedback(i).bits.feedback(VecFeedbacks.FLUSH) // have exception
237    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits                := DontCare
238    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.vaddr          := io.vecFeedback(i).bits.vaddr
239    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.gpaddr         := io.vecFeedback(i).bits.gpaddr
240    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.uopIdx     := io.vecFeedback(i).bits.uopidx
241    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.robIdx     := io.vecFeedback(i).bits.robidx
242    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vstart := io.vecFeedback(i).bits.vstart
243    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vl     := io.vecFeedback(i).bits.vl
244    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.exceptionVec     := io.vecFeedback(i).bits.exceptionVec
245  }
246
247
248  val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W)))
249  val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W)))
250  val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W)))
251
252  // state & misc
253  val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
254  val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio addr is valid
255  val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid
256  val allvalid  = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) // non-mmio data & addr is valid
257  val committed = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been committed by rob
258  val unaligned = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // unaligned store
259  val pending = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
260  val mmio = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio: inst is an mmio inst
261  val atomic = RegInit(VecInit(List.fill(StoreQueueSize)(false.B)))
262  val prefetch = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // need prefetch when committing this store to sbuffer?
263  val isVec = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store instruction
264  //val vec_lastuop = Reg(Vec(StoreQueueSize, Bool())) // last uop of vector store instruction
265  val vecMbCommit = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store committed from merge buffer to rob
266  val vecDataValid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store need write to sbuffer
267  val hasException = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // store has exception, should deq but not write sbuffer
268  val waitStoreS2 = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // wait for mmio and exception result until store_s2
269  // val vec_robCommit = Reg(Vec(StoreQueueSize, Bool())) // vector store committed by rob
270  // val vec_secondInv = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // Vector unit-stride, second entry is invalid
271  val vecExceptionFlag = RegInit(0.U.asTypeOf(Valid(new DynInst)))
272
273  // ptr
274  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr))))
275  val rdataPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
276  val deqPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
277  val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr))))
278  val addrReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
279  val dataReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
280
281  val enqPtr = enqPtrExt(0).value
282  val deqPtr = deqPtrExt(0).value
283  val cmtPtr = cmtPtrExt(0).value
284
285  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
286  val allowEnqueue = validCount <= (StoreQueueSize - LSQStEnqWidth).U
287
288  val deqMask = UIntToMask(deqPtr, StoreQueueSize)
289  val enqMask = UIntToMask(enqPtr, StoreQueueSize)
290
291  val commitCount = WireInit(0.U(log2Ceil(CommitWidth + 1).W))
292  val scommit = GatedRegNext(io.rob.scommit)
293
294  // RegNext misalign control for better timing
295  val doMisalignSt = GatedValidRegNext((rdataPtrExt(0).value === deqPtr) && (cmtPtr === deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && unaligned(deqPtr) && !isVec(deqPtr))
296  val finishMisalignSt = GatedValidRegNext(doMisalignSt && io.maControl.control.removeSq && !io.maControl.control.hasException)
297  val misalignBlock = doMisalignSt && !finishMisalignSt
298
299  // store miss align info
300  io.maControl.storeInfo.data := dataModule.io.rdata(0).data
301  io.maControl.storeInfo.dataReady := doMisalignSt
302  io.maControl.storeInfo.completeSbTrans := doMisalignSt && dataBuffer.io.enq(0).fire
303
304  // store can be committed by ROB
305  io.rob.mmio := DontCare
306  io.rob.uop := DontCare
307
308  // Read dataModule
309  assert(EnsbufferWidth <= 2)
310  // rdataPtrExtNext and rdataPtrExtNext+1 entry will be read from dataModule
311  val rdataPtrExtNext = Wire(Vec(EnsbufferWidth, new SqPtr))
312  rdataPtrExtNext := WireInit(Mux(dataBuffer.io.enq(1).fire,
313    VecInit(rdataPtrExt.map(_ + 2.U)),
314    Mux(dataBuffer.io.enq(0).fire || io.mmioStout.fire || io.vecmmioStout.fire,
315      VecInit(rdataPtrExt.map(_ + 1.U)),
316      rdataPtrExt
317    )
318  ))
319
320  // deqPtrExtNext traces which inst is about to leave store queue
321  //
322  // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
323  // Before data write finish, sbuffer is unable to provide store to load
324  // forward data. As an workaround, deqPtrExt and allocated flag update
325  // is delayed so that load can get the right data from store queue.
326  //
327  // Modify deqPtrExtNext and io.sqDeq with care!
328  val deqPtrExtNext = Wire(Vec(EnsbufferWidth, new SqPtr))
329  deqPtrExtNext := Mux(RegNext(io.sbuffer(1).fire),
330    VecInit(deqPtrExt.map(_ + 2.U)),
331    Mux((RegNext(io.sbuffer(0).fire)) || io.mmioStout.fire || io.vecmmioStout.fire,
332      VecInit(deqPtrExt.map(_ + 1.U)),
333      deqPtrExt
334    )
335  )
336
337  io.sqDeq := RegNext(Mux(RegNext(io.sbuffer(1).fire && !misalignBlock), 2.U,
338    Mux((RegNext(io.sbuffer(0).fire && !misalignBlock)) || io.mmioStout.fire || io.vecmmioStout.fire || finishMisalignSt, 1.U, 0.U)
339  ))
340  assert(!RegNext(RegNext(io.sbuffer(0).fire) && (io.mmioStout.fire || io.vecmmioStout.fire)))
341
342  for (i <- 0 until EnsbufferWidth) {
343    dataModule.io.raddr(i) := rdataPtrExtNext(i).value
344    paddrModule.io.raddr(i) := rdataPtrExtNext(i).value
345    vaddrModule.io.raddr(i) := rdataPtrExtNext(i).value
346  }
347
348  /**
349    * Enqueue at dispatch
350    *
351    * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth
352    */
353  io.enq.canAccept := allowEnqueue
354  val canEnqueue = io.enq.req.map(_.valid)
355  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
356  val vStoreFlow = io.enq.req.map(_.bits.numLsElem)
357  val validVStoreFlow = vStoreFlow.zipWithIndex.map{case (vLoadFlowNumItem, index) => Mux(!RegNext(io.brqRedirect.valid) && canEnqueue(index), vLoadFlowNumItem, 0.U)}
358  val validVStoreOffset = vStoreFlow.zip(io.enq.needAlloc).map{case (flow, needAllocItem) => Mux(needAllocItem, flow, 0.U)}
359  val validVStoreOffsetRShift = 0.U +: validVStoreOffset.take(vStoreFlow.length - 1)
360
361  for (i <- 0 until io.enq.req.length) {
362    val sqIdx = enqPtrExt(0) + validVStoreOffsetRShift.take(i + 1).reduce(_ + _)
363    val index = io.enq.req(i).bits.sqIdx
364    val enqInstr = io.enq.req(i).bits.instr.asTypeOf(new XSInstBitFields)
365    when (canEnqueue(i) && !enqCancel(i)) {
366      // The maximum 'numLsElem' number that can be emitted per dispatch port is:
367      //    16 2 2 2 2 2.
368      // Therefore, VecMemLSQEnqIteratorNumberSeq = Seq(16, 2, 2, 2, 2, 2)
369      for (j <- 0 until VecMemLSQEnqIteratorNumberSeq(i)) {
370        when (j.U < validVStoreOffset(i)) {
371          uop((index + j.U).value) := io.enq.req(i).bits
372          // NOTE: the index will be used when replay
373          uop((index + j.U).value).sqIdx := sqIdx + j.U
374          allocated((index + j.U).value) := true.B
375          datavalid((index + j.U).value) := false.B
376          addrvalid((index + j.U).value) := false.B
377          unaligned((index + j.U).value) := false.B
378          committed((index + j.U).value) := false.B
379          pending((index + j.U).value) := false.B
380          prefetch((index + j.U).value) := false.B
381          mmio((index + j.U).value) := false.B
382          isVec((index + j.U).value) := enqInstr.isVecStore // check vector store by the encoding of inst
383          vecMbCommit((index + j.U).value) := false.B
384          vecDataValid((index + j.U).value) := false.B
385          hasException((index + j.U).value) := false.B
386          waitStoreS2((index + j.U).value) := true.B
387          XSError(!io.enq.canAccept || !io.enq.lqCanAccept, s"must accept $i\n")
388          XSError(index.value =/= sqIdx.value, s"must be the same entry $i\n")
389        }
390      }
391    }
392    io.enq.resp(i) := sqIdx
393  }
394  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
395
396  /**
397    * Update addr/dataReadyPtr when issue from rs
398    */
399  // update issuePtr
400  val IssuePtrMoveStride = 4
401  require(IssuePtrMoveStride >= 2)
402
403  val addrReadyLookupVec = (0 until IssuePtrMoveStride).map(addrReadyPtrExt + _.U)
404  val addrReadyLookup = addrReadyLookupVec.map(ptr => allocated(ptr.value) &&
405   (mmio(ptr.value) || addrvalid(ptr.value) || vecMbCommit(ptr.value))
406    && ptr =/= enqPtrExt(0))
407  val nextAddrReadyPtr = addrReadyPtrExt + PriorityEncoder(VecInit(addrReadyLookup.map(!_) :+ true.B))
408  addrReadyPtrExt := nextAddrReadyPtr
409
410  val stAddrReadyVecReg = Wire(Vec(StoreQueueSize, Bool()))
411  (0 until StoreQueueSize).map(i => {
412    stAddrReadyVecReg(i) := allocated(i) && (mmio(i) || addrvalid(i) || (isVec(i) && vecMbCommit(i)))
413  })
414  io.stAddrReadyVec := GatedValidRegNext(stAddrReadyVecReg)
415
416  when (io.brqRedirect.valid) {
417    addrReadyPtrExt := Mux(
418      isAfter(cmtPtrExt(0), deqPtrExt(0)),
419      cmtPtrExt(0),
420      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
421    )
422  }
423
424  io.stAddrReadySqPtr := addrReadyPtrExt
425
426  // update
427  val dataReadyLookupVec = (0 until IssuePtrMoveStride).map(dataReadyPtrExt + _.U)
428  val dataReadyLookup = dataReadyLookupVec.map(ptr => allocated(ptr.value) &&
429   (mmio(ptr.value) || datavalid(ptr.value) || vecMbCommit(ptr.value))
430    && ptr =/= enqPtrExt(0))
431  val nextDataReadyPtr = dataReadyPtrExt + PriorityEncoder(VecInit(dataReadyLookup.map(!_) :+ true.B))
432  dataReadyPtrExt := nextDataReadyPtr
433
434  val stDataReadyVecReg = Wire(Vec(StoreQueueSize, Bool()))
435  (0 until StoreQueueSize).map(i => {
436    stDataReadyVecReg(i) := allocated(i) && (mmio(i) || datavalid(i) || (isVec(i) && vecMbCommit(i)))
437  })
438  io.stDataReadyVec := GatedValidRegNext(stDataReadyVecReg)
439
440  when (io.brqRedirect.valid) {
441    dataReadyPtrExt := Mux(
442      isAfter(cmtPtrExt(0), deqPtrExt(0)),
443      cmtPtrExt(0),
444      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
445    )
446  }
447
448  io.stDataReadySqPtr := dataReadyPtrExt
449  io.stIssuePtr := enqPtrExt(0)
450  io.sqDeqPtr := deqPtrExt(0)
451
452  /**
453    * Writeback store from store units
454    *
455    * Most store instructions writeback to regfile in the previous cycle.
456    * However,
457    *   (1) For an mmio instruction with exceptions, we need to mark it as addrvalid
458    * (in this way it will trigger an exception when it reaches ROB's head)
459    * instead of pending to avoid sending them to lower level.
460    *   (2) For an mmio instruction without exceptions, we mark it as pending.
461    * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel.
462    * Upon receiving the response, StoreQueue writes back the instruction
463    * through arbiter with store units. It will later commit as normal.
464    */
465
466  // Write addr to sq
467  for (i <- 0 until StorePipelineWidth) {
468    paddrModule.io.wen(i) := false.B
469    vaddrModule.io.wen(i) := false.B
470    dataModule.io.mask.wen(i) := false.B
471    val stWbIndex = io.storeAddrIn(i).bits.uop.sqIdx.value
472    exceptionBuffer.io.storeAddrIn(i).valid := io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss && !io.storeAddrIn(i).bits.isvec
473    exceptionBuffer.io.storeAddrIn(i).bits := io.storeAddrIn(i).bits
474    // will re-enter exceptionbuffer at store_s2
475    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := false.B
476    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := 0.U.asTypeOf(new LsPipelineBundle)
477
478    when (io.storeAddrIn(i).fire) {
479      val addr_valid = !io.storeAddrIn(i).bits.miss
480      addrvalid(stWbIndex) := addr_valid //!io.storeAddrIn(i).bits.mmio
481      // pending(stWbIndex) := io.storeAddrIn(i).bits.mmio
482      unaligned(stWbIndex) := io.storeAddrIn(i).bits.uop.exceptionVec(storeAddrMisaligned)
483
484      paddrModule.io.waddr(i) := stWbIndex
485      paddrModule.io.wdata(i) := io.storeAddrIn(i).bits.paddr
486      paddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
487      paddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
488      paddrModule.io.wen(i) := true.B
489
490      vaddrModule.io.waddr(i) := stWbIndex
491      vaddrModule.io.wdata(i) := io.storeAddrIn(i).bits.vaddr
492      vaddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
493      vaddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
494      vaddrModule.io.wen(i) := true.B
495
496      debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i)
497
498      // mmio(stWbIndex) := io.storeAddrIn(i).bits.mmio
499
500      uop(stWbIndex) := io.storeAddrIn(i).bits.uop
501      uop(stWbIndex).debugInfo := io.storeAddrIn(i).bits.uop.debugInfo
502
503      vecDataValid(stWbIndex) := io.storeAddrIn(i).bits.isvec
504
505      XSInfo("store addr write to sq idx %d pc 0x%x miss:%d vaddr %x paddr %x mmio %x isvec %x\n",
506        io.storeAddrIn(i).bits.uop.sqIdx.value,
507        io.storeAddrIn(i).bits.uop.pc,
508        io.storeAddrIn(i).bits.miss,
509        io.storeAddrIn(i).bits.vaddr,
510        io.storeAddrIn(i).bits.paddr,
511        io.storeAddrIn(i).bits.mmio,
512        io.storeAddrIn(i).bits.isvec
513      )
514    }
515
516    // re-replinish mmio, for pma/pmp will get mmio one cycle later
517    val storeAddrInFireReg = RegNext(io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss)
518    //val stWbIndexReg = RegNext(stWbIndex)
519    val stWbIndexReg = RegEnable(stWbIndex, io.storeAddrIn(i).fire)
520    when (storeAddrInFireReg) {
521      pending(stWbIndexReg) := io.storeAddrInRe(i).mmio
522      mmio(stWbIndexReg) := io.storeAddrInRe(i).mmio
523      atomic(stWbIndexReg) := io.storeAddrInRe(i).atomic
524      hasException(stWbIndexReg) := ExceptionNO.selectByFu(uop(stWbIndexReg).exceptionVec, StaCfg).asUInt.orR || io.storeAddrInRe(i).af
525      waitStoreS2(stWbIndexReg) := false.B
526    }
527    // dcache miss info (one cycle later than storeIn)
528    // if dcache report a miss in sta pipeline, this store will trigger a prefetch when committing to sbuffer (if EnableAtCommitMissTrigger)
529    when (storeAddrInFireReg) {
530      prefetch(stWbIndexReg) := io.storeAddrInRe(i).miss
531    }
532    // enter exceptionbuffer again
533    when (storeAddrInFireReg) {
534      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := io.storeAddrInRe(i).af
535      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := RegEnable(io.storeAddrIn(i).bits, io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss)
536      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.exceptionVec(storeAccessFault) := io.storeAddrInRe(i).af
537    }
538
539    when(vaddrModule.io.wen(i)){
540      debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i)
541    }
542  }
543
544  // Write data to sq
545  // Now store data pipeline is actually 2 stages
546  for (i <- 0 until StorePipelineWidth) {
547    dataModule.io.data.wen(i) := false.B
548    val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value
549    val isVec     = FuType.isVStore(io.storeDataIn(i).bits.uop.fuType)
550    // sq data write takes 2 cycles:
551    // sq data write s0
552    when (io.storeDataIn(i).fire) {
553      // send data write req to data module
554      dataModule.io.data.waddr(i) := stWbIndex
555      dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.fuOpType === LSUOpType.cbo_zero,
556        0.U,
557        Mux(isVec,
558          io.storeDataIn(i).bits.data,
559          genVWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.fuOpType(2,0)))
560      )
561      dataModule.io.data.wen(i) := true.B
562
563      debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i)
564
565      XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n",
566        io.storeDataIn(i).bits.uop.sqIdx.value,
567        io.storeDataIn(i).bits.uop.pc,
568        io.storeDataIn(i).bits.data,
569        dataModule.io.data.wdata(i)
570      )
571    }
572    // sq data write s1
573    when (
574      RegNext(io.storeDataIn(i).fire)
575      // && !RegNext(io.storeDataIn(i).bits.uop).robIdx.needFlush(io.brqRedirect)
576    ) {
577      datavalid(RegEnable(stWbIndex, io.storeDataIn(i).fire)) := true.B
578    }
579  }
580
581  // Write mask to sq
582  for (i <- 0 until StorePipelineWidth) {
583    // sq mask write s0
584    when (io.storeMaskIn(i).fire) {
585      // send data write req to data module
586      dataModule.io.mask.waddr(i) := io.storeMaskIn(i).bits.sqIdx.value
587      dataModule.io.mask.wdata(i) := io.storeMaskIn(i).bits.mask
588      dataModule.io.mask.wen(i) := true.B
589    }
590  }
591
592  /**
593    * load forward query
594    *
595    * Check store queue for instructions that is older than the load.
596    * The response will be valid at the next cycle after req.
597    */
598  // check over all lq entries and forward data from the first matched store
599  for (i <- 0 until LoadPipelineWidth) {
600    // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases:
601    // (1) if they have the same flag, we need to check range(tail, sqIdx)
602    // (2) if they have different flags, we need to check range(tail, VirtualLoadQueueSize) and range(0, sqIdx)
603    // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, VirtualLoadQueueSize))
604    // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
605    // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
606    val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag
607    val forwardMask = io.forward(i).sqIdxMask
608    // all addrvalid terms need to be checked
609    // Real Vaild: all scalar stores, and vector store with (!inactive && !secondInvalid)
610    val addrRealValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
611    // vector store will consider all inactive || secondInvalid flows as valid
612    val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
613    val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => datavalid(j))))
614    val allValidVec  = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && datavalid(j) && allocated(j))))
615
616    val lfstEnable = Constantin.createRecord("LFSTEnable", LFSTEnable)
617    val storeSetHitVec = Mux(lfstEnable,
618      WireInit(VecInit((0 until StoreQueueSize).map(j => io.forward(i).uop.loadWaitBit && uop(j).robIdx === io.forward(i).uop.waitForRobIdx))),
619      WireInit(VecInit((0 until StoreQueueSize).map(j => uop(j).storeSetHit && uop(j).ssid === io.forward(i).uop.ssid)))
620    )
621
622    val forwardMask1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask)
623    val forwardMask2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W))
624    val canForward1 = forwardMask1 & allValidVec.asUInt
625    val canForward2 = forwardMask2 & allValidVec.asUInt
626    val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask)
627
628    XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " +
629      p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n"
630    )
631
632    // do real fwd query (cam lookup in load_s1)
633    dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt
634    dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt
635
636    vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr
637    vaddrModule.io.forwardDataMask(i) := io.forward(i).mask
638    paddrModule.io.forwardMdata(i) := io.forward(i).paddr
639    paddrModule.io.forwardDataMask(i) := io.forward(i).mask
640
641    // vaddr cam result does not equal to paddr cam result
642    // replay needed
643    // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U
644    // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid
645    val vpmaskNotEqual = (
646      (RegEnable(paddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid) ^ RegEnable(vaddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid)) &
647      RegNext(needForward) &
648      GatedRegNext(addrRealValidVec.asUInt)
649    ) =/= 0.U
650    val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid)
651    when (vaddrMatchFailed) {
652      XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n",
653        RegEnable(io.forward(i).uop.pc, io.forward(i).valid),
654        RegEnable(needForward & paddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid),
655        RegEnable(needForward & vaddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid)
656      );
657    }
658    XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual)
659    XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed)
660
661    // Fast forward mask will be generated immediately (load_s1)
662    io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i)
663
664    // Forward result will be generated 1 cycle later (load_s2)
665    io.forward(i).forwardMask := dataModule.io.forwardMask(i)
666    io.forward(i).forwardData := dataModule.io.forwardData(i)
667    // If addr match, data not ready, mark it as dataInvalid
668    // load_s1: generate dataInvalid in load_s1 to set fastUop
669    val dataInvalidMask1 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask1.asUInt)
670    val dataInvalidMask2 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask2.asUInt)
671    val dataInvalidMask = dataInvalidMask1 | dataInvalidMask2
672    io.forward(i).dataInvalidFast := dataInvalidMask.orR
673
674    // make chisel happy
675    val dataInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
676    dataInvalidMask1Reg := RegNext(dataInvalidMask1)
677    // make chisel happy
678    val dataInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
679    dataInvalidMask2Reg := RegNext(dataInvalidMask2)
680    val dataInvalidMaskReg = dataInvalidMask1Reg | dataInvalidMask2Reg
681
682    // If SSID match, address not ready, mark it as addrInvalid
683    // load_s2: generate addrInvalid
684    val addrInvalidMask1 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask1.asUInt)
685    val addrInvalidMask2 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask2.asUInt)
686    // make chisel happy
687    val addrInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
688    addrInvalidMask1Reg := RegNext(addrInvalidMask1)
689    // make chisel happy
690    val addrInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
691    addrInvalidMask2Reg := RegNext(addrInvalidMask2)
692    val addrInvalidMaskReg = addrInvalidMask1Reg | addrInvalidMask2Reg
693
694    // load_s2
695    io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast)
696    // check if vaddr forward mismatched
697    io.forward(i).matchInvalid := vaddrMatchFailed
698
699    // data invalid sq index
700    // check whether false fail
701    // check flag
702    val s2_differentFlag = RegNext(differentFlag)
703    val s2_enqPtrExt = RegNext(enqPtrExt(0))
704    val s2_deqPtrExt = RegNext(deqPtrExt(0))
705
706    // addr invalid sq index
707    // make chisel happy
708    val addrInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
709    addrInvalidMaskRegWire := addrInvalidMaskReg
710    val addrInvalidFlag = addrInvalidMaskRegWire.orR
711    val hasInvalidAddr = (~addrValidVec.asUInt & needForward).orR
712
713    val addrInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask1Reg))))
714    val addrInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask2Reg))))
715    val addrInvalidSqIdx = Mux(addrInvalidMask2Reg.orR, addrInvalidSqIdx2, addrInvalidSqIdx1)
716
717    // store-set content management
718    //                +-----------------------+
719    //                | Search a SSID for the |
720    //                |    load operation     |
721    //                +-----------------------+
722    //                           |
723    //                           V
724    //                 +-------------------+
725    //                 | load wait strict? |
726    //                 +-------------------+
727    //                           |
728    //                           V
729    //               +----------------------+
730    //            Set|                      |Clean
731    //               V                      V
732    //  +------------------------+   +------------------------------+
733    //  | Waiting for all older  |   | Wait until the corresponding |
734    //  |   stores operations    |   | older store operations       |
735    //  +------------------------+   +------------------------------+
736
737
738
739    when (RegEnable(io.forward(i).uop.loadWaitStrict, io.forward(i).valid)) {
740      io.forward(i).addrInvalidSqIdx := RegEnable((io.forward(i).uop.sqIdx - 1.U), io.forward(i).valid)
741    } .elsewhen (addrInvalidFlag) {
742      io.forward(i).addrInvalidSqIdx.flag := Mux(!s2_differentFlag || addrInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
743      io.forward(i).addrInvalidSqIdx.value := addrInvalidSqIdx
744    } .otherwise {
745      // may be store inst has been written to sbuffer already.
746      io.forward(i).addrInvalidSqIdx := RegEnable(io.forward(i).uop.sqIdx, io.forward(i).valid)
747    }
748    io.forward(i).addrInvalid := Mux(RegEnable(io.forward(i).uop.loadWaitStrict, io.forward(i).valid), RegNext(hasInvalidAddr), addrInvalidFlag)
749
750    // data invalid sq index
751    // make chisel happy
752    val dataInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
753    dataInvalidMaskRegWire := dataInvalidMaskReg
754    val dataInvalidFlag = dataInvalidMaskRegWire.orR
755
756    val dataInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask1Reg))))
757    val dataInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask2Reg))))
758    val dataInvalidSqIdx = Mux(dataInvalidMask2Reg.orR, dataInvalidSqIdx2, dataInvalidSqIdx1)
759
760    when (dataInvalidFlag) {
761      io.forward(i).dataInvalidSqIdx.flag := Mux(!s2_differentFlag || dataInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
762      io.forward(i).dataInvalidSqIdx.value := dataInvalidSqIdx
763    } .otherwise {
764      // may be store inst has been written to sbuffer already.
765      io.forward(i).dataInvalidSqIdx := RegEnable(io.forward(i).uop.sqIdx, io.forward(i).valid)
766    }
767  }
768
769  /**
770    * Memory mapped IO / other uncached operations / CMO
771    *
772    * States:
773    * (1) writeback from store units: mark as pending
774    * (2) when they reach ROB's head, they can be sent to uncache channel
775    * (3) response from uncache channel: mark as datavalidmask.wen
776    * (4) writeback to ROB (and other units): mark as writebacked
777    * (5) ROB commits the instruction: same as normal instructions
778    */
779  //(2) when they reach ROB's head, they can be sent to uncache channel
780  // TODO: CAN NOT deal with vector mmio now!
781  val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5)
782  val uncacheState = RegInit(s_idle)
783  val uncacheUop = Reg(new DynInst)
784  val uncacheVAddr = Reg(UInt(VAddrBits.W))
785  val cboFlushedSb = RegInit(false.B)
786  switch(uncacheState) {
787    is(s_idle) {
788      when(RegNext(io.rob.pendingst && uop(deqPtr).robIdx === io.rob.pendingPtr && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr))) {
789        uncacheState := s_req
790        uncacheUop := uop(deqPtr)
791        cboFlushedSb := false.B
792      }
793    }
794    is(s_req) {
795      when (io.uncache.req.fire) {
796        when (io.uncacheOutstanding) {
797          uncacheState := s_wb
798        } .otherwise {
799          uncacheState := s_resp
800        }
801      }
802    }
803    is(s_resp) {
804      when(io.uncache.resp.fire) {
805        uncacheState := s_wb
806
807        when (io.uncache.resp.bits.nderr) {
808          uncacheUop.exceptionVec(storeAccessFault) := true.B
809        }
810      }
811    }
812    is(s_wb) {
813      when (io.mmioStout.fire || io.vecmmioStout.fire) {
814        uncacheState := s_wait
815      }
816    }
817    is(s_wait) {
818      // A MMIO store can always move cmtPtrExt as it must be ROB head
819      when(scommit > 0.U) {
820        uncacheState := s_idle // ready for next mmio
821      }
822    }
823  }
824  io.uncache.req.valid := uncacheState === s_req
825
826  io.uncache.req.bits := DontCare
827  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XWR
828  io.uncache.req.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0)
829  io.uncache.req.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data)
830  io.uncache.req.bits.mask := shiftMaskToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).mask)
831
832  // CBO op type check can be delayed for 1 cycle,
833  // as uncache op will not start in s_idle
834  val cboMmioAddr = get_block_addr(paddrModule.io.rdata(0))
835  val deqCanDoCbo = GatedRegNext(LSUOpType.isCbo(uop(deqPtr).fuOpType) && allocated(deqPtr) && addrvalid(deqPtr))
836  when (deqCanDoCbo) {
837    // disable uncache channel
838    io.uncache.req.valid := false.B
839
840    when (io.cmoOpReq.fire) {
841      uncacheState := s_resp
842    }
843
844    when (uncacheState === s_resp) {
845      when (io.cmoOpResp.fire) {
846        uncacheState := s_wb
847      }
848    }
849  }
850
851  io.cmoOpReq.valid := deqCanDoCbo && cboFlushedSb && (uncacheState === s_req)
852  io.cmoOpReq.bits.opcode  := uop(deqPtr).fuOpType(1, 0)
853  io.cmoOpReq.bits.address := cboMmioAddr
854
855  io.cmoOpResp.ready := deqCanDoCbo && (uncacheState === s_resp)
856
857  io.flushSbuffer.valid := deqCanDoCbo && !cboFlushedSb && (uncacheState === s_req) && !io.flushSbuffer.empty
858
859  when(deqCanDoCbo && !cboFlushedSb && (uncacheState === s_req) && io.flushSbuffer.empty) {
860    cboFlushedSb := true.B
861  }
862
863  io.uncache.req.bits.atomic := atomic(GatedRegNext(rdataPtrExtNext(0)).value)
864
865  when(io.uncache.req.fire){
866    // mmio store should not be committed until uncache req is sent
867    pending(deqPtr) := false.B
868
869    XSDebug(
870      p"uncache req: pc ${Hexadecimal(uop(deqPtr).pc)} " +
871      p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " +
872      p"data ${Hexadecimal(io.uncache.req.bits.data)} " +
873      p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " +
874      p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n"
875    )
876  }
877
878  // (3) response from uncache channel: mark as datavalid
879  io.uncache.resp.ready := true.B
880
881  // (4) scalar store: writeback to ROB (and other units): mark as writebacked
882  io.mmioStout.valid := uncacheState === s_wb && !isVec(deqPtr)
883  io.mmioStout.bits.uop := uncacheUop
884  io.mmioStout.bits.uop.sqIdx := deqPtrExt(0)
885  io.mmioStout.bits.uop.flushPipe := deqCanDoCbo // flush Pipeline to keep order in CMO
886  io.mmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
887  io.mmioStout.bits.debug.isMMIO := true.B
888  io.mmioStout.bits.debug.paddr := DontCare
889  io.mmioStout.bits.debug.isPerfCnt := false.B
890  io.mmioStout.bits.debug.vaddr := DontCare
891  // Remove MMIO inst from store queue after MMIO request is being sent
892  // That inst will be traced by uncache state machine
893  when (io.mmioStout.fire) {
894    allocated(deqPtr) := false.B
895  }
896
897  exceptionBuffer.io.storeAddrIn.last.valid := io.mmioStout.fire
898  exceptionBuffer.io.storeAddrIn.last.bits := DontCare
899  exceptionBuffer.io.storeAddrIn.last.bits.vaddr := vaddrModule.io.rdata.head
900  exceptionBuffer.io.storeAddrIn.last.bits.uop := uncacheUop
901
902  // (4) or vector store:
903  // TODO: implement it!
904  io.vecmmioStout := DontCare
905  io.vecmmioStout.valid := false.B //uncacheState === s_wb && isVec(deqPtr)
906  io.vecmmioStout.bits.uop := uop(deqPtr)
907  io.vecmmioStout.bits.uop.sqIdx := deqPtrExt(0)
908  io.vecmmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
909  io.vecmmioStout.bits.debug.isMMIO := true.B
910  io.vecmmioStout.bits.debug.paddr := DontCare
911  io.vecmmioStout.bits.debug.isPerfCnt := false.B
912  io.vecmmioStout.bits.debug.vaddr := DontCare
913  // Remove MMIO inst from store queue after MMIO request is being sent
914  // That inst will be traced by uncache state machine
915  when (io.vecmmioStout.fire) {
916    allocated(deqPtr) := false.B
917  }
918
919  /**
920    * ROB commits store instructions (mark them as committed)
921    *
922    * (1) When store commits, mark it as committed.
923    * (2) They will not be cancelled and can be sent to lower level.
924    */
925  XSError(uncacheState =/= s_idle && uncacheState =/= s_wait && commitCount > 0.U,
926   "should not commit instruction when MMIO has not been finished\n")
927
928  val commitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
929  val needCancel = Wire(Vec(StoreQueueSize, Bool())) // Will be assigned later
930  dontTouch(commitVec)
931  // TODO: Deal with vector store mmio
932  for (i <- 0 until CommitWidth) {
933    when (allocated(cmtPtrExt(i).value) && isNotAfter(uop(cmtPtrExt(i).value).robIdx, GatedRegNext(io.rob.pendingPtr)) && !needCancel(cmtPtrExt(i).value) && (!waitStoreS2(cmtPtrExt(i).value) || isVec(cmtPtrExt(i).value))) {
934      // don't commit while doing misalign
935      if (i == 0){
936        // TODO: fixme for vector mmio
937        when ((uncacheState === s_idle) || (uncacheState === s_wait && scommit > 0.U)){
938          when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) {
939            committed(cmtPtrExt(0).value) := Mux(misalignBlock, false.B, true.B)
940            commitVec(0) := Mux(misalignBlock, false.B, true.B)
941          }
942        }
943      } else {
944        when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) {
945          committed(cmtPtrExt(i).value) := Mux(misalignBlock, false.B, commitVec(i - 1) || committed(cmtPtrExt(i).value))
946          commitVec(i) := Mux(misalignBlock, false.B, commitVec(i - 1))
947        }
948      }
949    }
950  }
951
952  commitCount := PopCount(commitVec)
953  cmtPtrExt := cmtPtrExt.map(_ + commitCount)
954
955  // committed stores will not be cancelled and can be sent to lower level.
956  // remove retired insts from sq, add retired store to sbuffer
957
958  // Read data from data module
959  // As store queue grows larger and larger, time needed to read data from data
960  // module keeps growing higher. Now we give data read a whole cycle.
961  for (i <- 0 until EnsbufferWidth) {
962    val ptr = rdataPtrExt(i).value
963    val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
964    val exceptionValid = if(i == 0) hasException(rdataPtrExt(0).value) else {
965      hasException(rdataPtrExt(i).value) || (hasException(rdataPtrExt(i-1).value) && uop(rdataPtrExt(i).value).robIdx === uop(rdataPtrExt(i-1).value).robIdx)
966    }
967    val vecNotAllMask = dataModule.io.rdata(i).mask.orR
968    // Vector instructions that prevent triggered exceptions from being written to the 'databuffer'.
969    val vecHasExceptionFlagValid = vecExceptionFlag.valid && isVec(ptr) && vecExceptionFlag.bits.robIdx === uop(ptr).robIdx
970    if (i == 0) {
971      // use dataBuffer write port 0 to writeback missaligned store out
972      dataBuffer.io.enq(i).valid := Mux(
973        doMisalignSt,
974        io.maControl.control.writeSb,
975        allocated(ptr) && committed(ptr) && ((!isVec(ptr) && (allvalid(ptr) || hasException(ptr))) || vecMbCommit(ptr)) && !mmioStall
976      )
977    } else {
978      dataBuffer.io.enq(i).valid := Mux(
979        doMisalignSt,
980        false.B,
981        allocated(ptr) && committed(ptr) && ((!isVec(ptr) && (allvalid(ptr) || hasException(ptr))) || vecMbCommit(ptr)) && !mmioStall
982      )
983    }
984    // Note that store data/addr should both be valid after store's commit
985    assert(!dataBuffer.io.enq(i).valid || allvalid(ptr) || doMisalignSt || (allocated(ptr) && vecMbCommit(ptr)))
986    dataBuffer.io.enq(i).bits.addr     := Mux(doMisalignSt, io.maControl.control.paddr, paddrModule.io.rdata(i))
987    dataBuffer.io.enq(i).bits.vaddr    := Mux(doMisalignSt, io.maControl.control.vaddr, vaddrModule.io.rdata(i))
988    dataBuffer.io.enq(i).bits.data     := Mux(doMisalignSt, io.maControl.control.wdata, dataModule.io.rdata(i).data)
989    dataBuffer.io.enq(i).bits.mask     := Mux(doMisalignSt, io.maControl.control.wmask, dataModule.io.rdata(i).mask)
990    dataBuffer.io.enq(i).bits.wline    := Mux(doMisalignSt, false.B, paddrModule.io.rlineflag(i))
991    dataBuffer.io.enq(i).bits.sqPtr    := rdataPtrExt(i)
992    dataBuffer.io.enq(i).bits.prefetch := Mux(doMisalignSt, false.B, prefetch(ptr))
993    // when scalar has exception, will also not write into sbuffer
994    dataBuffer.io.enq(i).bits.vecValid := Mux(doMisalignSt, true.B, (!isVec(ptr) || (vecDataValid(ptr) && vecNotAllMask)) && !exceptionValid && !vecHasExceptionFlagValid)
995//    dataBuffer.io.enq(i).bits.vecValid := (!isVec(ptr) || vecDataValid(ptr)) && !hasException(ptr)
996  }
997
998  // Send data stored in sbufferReqBitsReg to sbuffer
999  for (i <- 0 until EnsbufferWidth) {
1000    io.sbuffer(i).valid := dataBuffer.io.deq(i).valid
1001    dataBuffer.io.deq(i).ready := io.sbuffer(i).ready
1002    io.sbuffer(i).bits := DontCare
1003    io.sbuffer(i).bits.cmd   := MemoryOpConstants.M_XWR
1004    io.sbuffer(i).bits.addr  := dataBuffer.io.deq(i).bits.addr
1005    io.sbuffer(i).bits.vaddr := dataBuffer.io.deq(i).bits.vaddr
1006    io.sbuffer(i).bits.data  := dataBuffer.io.deq(i).bits.data
1007    io.sbuffer(i).bits.mask  := dataBuffer.io.deq(i).bits.mask
1008    io.sbuffer(i).bits.wline := dataBuffer.io.deq(i).bits.wline && dataBuffer.io.deq(i).bits.vecValid
1009    io.sbuffer(i).bits.prefetch := dataBuffer.io.deq(i).bits.prefetch
1010    io.sbuffer(i).bits.vecValid := dataBuffer.io.deq(i).bits.vecValid
1011    // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
1012    // Before data write finish, sbuffer is unable to provide store to load
1013    // forward data. As an workaround, deqPtrExt and allocated flag update
1014    // is delayed so that load can get the right data from store queue.
1015    val ptr = dataBuffer.io.deq(i).bits.sqPtr.value
1016    when (RegNext(io.sbuffer(i).fire && !doMisalignSt)) {
1017      allocated(RegEnable(ptr, io.sbuffer(i).fire)) := false.B
1018      XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
1019    }
1020  }
1021
1022  // All vector instruction uop normally dequeue, but the Uop after the exception is raised does not write to the 'sbuffer'.
1023  // Flags are used to record whether there are any exceptions when the queue is displayed.
1024  // This is determined each time a write is made to the 'databuffer', prevent subsequent uop of the same instruction from writing to the 'dataBuffer'.
1025  val vecCommitHasException = (0 until EnsbufferWidth).map{ i =>
1026    val ptr                 = rdataPtrExt(i).value
1027    val mmioStall           = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
1028    val exceptionVliad      = allocated(ptr) && committed(ptr) && vecMbCommit(ptr) && !mmioStall && isVec(ptr) && vecDataValid(ptr) && hasException(ptr)
1029    (exceptionVliad, uop(ptr))
1030  }
1031
1032  val vecCommitHasExceptionValid      = vecCommitHasException.map(_._1)
1033  val vecCommitHasExceptionUop        = vecCommitHasException.map(_._2)
1034  val vecCommitHasExceptionValidOR    = vecCommitHasExceptionValid.reduce(_ || _)
1035  // Just select the last Uop tah has an exception.
1036  val vecCommitHasExceptionSelectUop  = ParallelPosteriorityMux(vecCommitHasExceptionValid, vecCommitHasExceptionUop)
1037  // If the last Uop with an exception is the LastUop of this instruction, the flag is not set.
1038  val vecCommitLastUop = vecCommitHasExceptionSelectUop.lastUop
1039
1040  val vecExceptionFlagCancel  = (0 until EnsbufferWidth).map{ i =>
1041    val ptr                   = rdataPtrExt(i).value
1042    val mmioStall             = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
1043    val vecLastUopCommit      = uop(ptr).lastUop && (uop(ptr).robIdx === vecExceptionFlag.bits.robIdx) &&
1044                                allocated(ptr) && committed(ptr) && vecMbCommit(ptr) && !mmioStall && isVec(ptr) && vecDataValid(ptr)
1045    vecLastUopCommit
1046  }.reduce(_ || _)
1047
1048  // When a LastUop with an exception instruction is commited, clear the flag.
1049  when(!vecExceptionFlag.valid && vecCommitHasExceptionValidOR && !vecCommitLastUop) {
1050    vecExceptionFlag.valid  := true.B
1051    vecExceptionFlag.bits   := vecCommitHasExceptionSelectUop
1052  }.elsewhen(vecExceptionFlag.valid && vecExceptionFlagCancel) {
1053    vecExceptionFlag.valid  := false.B
1054    vecExceptionFlag.bits   := 0.U.asTypeOf(new DynInst)
1055  }
1056
1057  // A dumb defensive code. The flag should not be placed for a long period of time.
1058  // A relatively large timeout period, not have any special meaning.
1059  // If an assert appears and you confirm that it is not a Bug: Increase the timeout or remove the assert.
1060  TimeOutAssert(vecExceptionFlag.valid, 3000, "vecExceptionFlag timeout, Plase check for bugs or add timeouts.")
1061
1062  // Initialize when unenabled difftest.
1063  for (i <- 0 until EnsbufferWidth) {
1064    io.sbufferVecDifftestInfo(i) := DontCare
1065  }
1066  // Consistent with the logic above.
1067  // Only the vector store difftest required signal is separated from the rtl code.
1068  if (env.EnableDifftest) {
1069    for (i <- 0 until EnsbufferWidth) {
1070      val ptr = rdataPtrExt(i).value
1071      val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
1072      difftestBuffer.get.io.enq(i).valid := dataBuffer.io.enq(i).valid
1073      difftestBuffer.get.io.enq(i).bits := uop(ptr)
1074    }
1075    for (i <- 0 until EnsbufferWidth) {
1076      io.sbufferVecDifftestInfo(i).valid := difftestBuffer.get.io.deq(i).valid
1077      difftestBuffer.get.io.deq(i).ready := io.sbufferVecDifftestInfo(i).ready
1078
1079      io.sbufferVecDifftestInfo(i).bits := difftestBuffer.get.io.deq(i).bits
1080    }
1081  }
1082
1083  (1 until EnsbufferWidth).foreach(i => when(io.sbuffer(i).fire) { assert(io.sbuffer(i - 1).fire) })
1084  if (coreParams.dcacheParametersOpt.isEmpty) {
1085    for (i <- 0 until EnsbufferWidth) {
1086      val ptr = deqPtrExt(i).value
1087      val ram = DifftestMem(64L * 1024 * 1024 * 1024, 8)
1088      val wen = allocated(ptr) && committed(ptr) && !mmio(ptr)
1089      val waddr = ((paddrModule.io.rdata(i) - "h80000000".U) >> 3).asUInt
1090      val wdata = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).data(127, 64), dataModule.io.rdata(i).data(63, 0))
1091      val wmask = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).mask(15, 8), dataModule.io.rdata(i).mask(7, 0))
1092      when (wen) {
1093        ram.write(waddr, wdata.asTypeOf(Vec(8, UInt(8.W))), wmask.asBools)
1094      }
1095    }
1096  }
1097
1098  // Read vaddr for mem exception
1099  io.exceptionAddr.vaddr  := exceptionBuffer.io.exceptionAddr.vaddr
1100  io.exceptionAddr.gpaddr := exceptionBuffer.io.exceptionAddr.gpaddr
1101  io.exceptionAddr.vstart := exceptionBuffer.io.exceptionAddr.vstart
1102  io.exceptionAddr.vl     := exceptionBuffer.io.exceptionAddr.vl
1103  io.exceptionAddr.isForVSnonLeafPTE:= exceptionBuffer.io.exceptionAddr.isForVSnonLeafPTE
1104
1105  // vector commit or replay from
1106  val vecCommittmp = Wire(Vec(StoreQueueSize, Vec(VecStorePipelineWidth, Bool())))
1107  val vecCommit = Wire(Vec(StoreQueueSize, Bool()))
1108  for (i <- 0 until StoreQueueSize) {
1109    val fbk = io.vecFeedback
1110    for (j <- 0 until VecStorePipelineWidth) {
1111      vecCommittmp(i)(j) := fbk(j).valid && (fbk(j).bits.isCommit || fbk(j).bits.isFlush) &&
1112        uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx && allocated(i)
1113    }
1114    vecCommit(i) := vecCommittmp(i).reduce(_ || _)
1115
1116    when (vecCommit(i)) {
1117      vecMbCommit(i) := true.B
1118    }
1119  }
1120
1121  // misprediction recovery / exception redirect
1122  // invalidate sq term using robIdx
1123  for (i <- 0 until StoreQueueSize) {
1124    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !committed(i) &&
1125      (!isVec(i) || !(uop(i).robIdx === io.brqRedirect.bits.robIdx))
1126    when (needCancel(i)) {
1127      allocated(i) := false.B
1128    }
1129  }
1130
1131 /**
1132* update pointers
1133**/
1134  val enqCancelValid = canEnqueue.zip(io.enq.req).map{case (v , x) =>
1135    v && x.bits.robIdx.needFlush(io.brqRedirect)
1136  }
1137  val enqCancelNum = enqCancelValid.zip(io.enq.req).map{case (v, req) =>
1138    Mux(v, req.bits.numLsElem, 0.U)
1139  }
1140  val lastEnqCancel = RegEnable(enqCancelNum.reduce(_ + _), io.brqRedirect.valid) // 1 cycle after redirect
1141
1142  val lastCycleCancelCount = PopCount(RegEnable(needCancel, io.brqRedirect.valid)) // 1 cycle after redirect
1143  val lastCycleRedirect = RegNext(io.brqRedirect.valid) // 1 cycle after redirect
1144  val enqNumber = validVStoreFlow.reduce(_ + _)
1145
1146  val lastlastCycleRedirect=RegNext(lastCycleRedirect)// 2 cycle after redirect
1147  val redirectCancelCount = RegEnable(lastCycleCancelCount + lastEnqCancel, 0.U, lastCycleRedirect) // 2 cycle after redirect
1148
1149  when (lastlastCycleRedirect) {
1150    // we recover the pointers in 2 cycle after redirect for better timing
1151    enqPtrExt := VecInit(enqPtrExt.map(_ - redirectCancelCount))
1152  }.otherwise {
1153    // lastCycleRedirect.valid or nornal case
1154    // when lastCycleRedirect.valid, enqNumber === 0.U, enqPtrExt will not change
1155    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
1156  }
1157  assert(!(lastCycleRedirect && enqNumber =/= 0.U))
1158
1159  exceptionBuffer.io.flushFrmMaBuf := finishMisalignSt
1160  // special case (store miss align) in updating ptr
1161  when (doMisalignSt) {
1162    when (!finishMisalignSt) {
1163      // dont move deqPtr and rdataPtr until all split store has been written to sb
1164      deqPtrExtNext := deqPtrExt
1165      rdataPtrExtNext := rdataPtrExt
1166    } .otherwise {
1167      // remove this unaligned store from sq
1168      allocated(deqPtr) := false.B
1169      committed(deqPtr) := true.B
1170      cmtPtrExt := cmtPtrExt.map(_ + 1.U)
1171      deqPtrExtNext := deqPtrExt.map(_ + 1.U)
1172      rdataPtrExtNext := rdataPtrExt.map(_ + 1.U)
1173    }
1174  }
1175
1176  deqPtrExt := deqPtrExtNext
1177  rdataPtrExt := rdataPtrExtNext
1178
1179  // val dequeueCount = Mux(io.sbuffer(1).fire, 2.U, Mux(io.sbuffer(0).fire || io.mmioStout.fire, 1.U, 0.U))
1180
1181  // If redirect at T0, sqCancelCnt is at T2
1182  io.sqCancelCnt := redirectCancelCount
1183  val ForceWriteUpper = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1184  ForceWriteUpper := Constantin.createRecord(s"ForceWriteUpper_${p(XSCoreParamsKey).HartId}", initValue = 60)
1185  val ForceWriteLower = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1186  ForceWriteLower := Constantin.createRecord(s"ForceWriteLower_${p(XSCoreParamsKey).HartId}", initValue = 55)
1187
1188  val valid_cnt = PopCount(allocated)
1189  io.force_write := RegNext(Mux(valid_cnt >= ForceWriteUpper, true.B, valid_cnt >= ForceWriteLower && io.force_write), init = false.B)
1190
1191  // io.sqempty will be used by sbuffer
1192  // We delay it for 1 cycle for better timing
1193  // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty
1194  // for 1 cycle will also promise that sq is empty in that cycle
1195  io.sqEmpty := RegNext(
1196    enqPtrExt(0).value === deqPtrExt(0).value &&
1197    enqPtrExt(0).flag === deqPtrExt(0).flag
1198  )
1199  // perf counter
1200  QueuePerf(StoreQueueSize, validCount, !allowEnqueue)
1201  val vecValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => allocated(i) && isVec(i))))
1202  QueuePerf(StoreQueueSize, PopCount(vecValidVec), !allowEnqueue)
1203  io.sqFull := !allowEnqueue
1204  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
1205  XSPerfAccumulate("mmioCnt", io.uncache.req.fire)
1206  XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire)
1207  XSPerfAccumulate("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready))
1208  XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0)))
1209  XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0)))
1210  XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0)))
1211
1212  val perfValidCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
1213  val perfEvents = Seq(
1214    ("mmioCycle      ", uncacheState =/= s_idle),
1215    ("mmioCnt        ", io.uncache.req.fire),
1216    ("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire),
1217    ("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready)),
1218    ("stq_1_4_valid  ", (perfValidCount < (StoreQueueSize.U/4.U))),
1219    ("stq_2_4_valid  ", (perfValidCount > (StoreQueueSize.U/4.U)) & (perfValidCount <= (StoreQueueSize.U/2.U))),
1220    ("stq_3_4_valid  ", (perfValidCount > (StoreQueueSize.U/2.U)) & (perfValidCount <= (StoreQueueSize.U*3.U/4.U))),
1221    ("stq_4_4_valid  ", (perfValidCount > (StoreQueueSize.U*3.U/4.U))),
1222  )
1223  generatePerfEvent()
1224
1225  // debug info
1226  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr)
1227
1228  def PrintFlag(flag: Bool, name: String): Unit = {
1229    when(flag) {
1230      XSDebug(false, true.B, name)
1231    }.otherwise {
1232      XSDebug(false, true.B, " ")
1233    }
1234  }
1235
1236  for (i <- 0 until StoreQueueSize) {
1237    XSDebug(s"$i: pc %x va %x pa %x data %x ",
1238      uop(i).pc,
1239      debug_vaddr(i),
1240      debug_paddr(i),
1241      debug_data(i)
1242    )
1243    PrintFlag(allocated(i), "a")
1244    PrintFlag(allocated(i) && addrvalid(i), "a")
1245    PrintFlag(allocated(i) && datavalid(i), "d")
1246    PrintFlag(allocated(i) && committed(i), "c")
1247    PrintFlag(allocated(i) && pending(i), "p")
1248    PrintFlag(allocated(i) && mmio(i), "m")
1249    XSDebug(false, true.B, "\n")
1250  }
1251
1252}
1253