xref: /XiangShan/src/main/scala/xiangshan/backend/rob/Rob.scala (revision 00c6a8aac7d4c20605fe93ba33304a90b4b5126a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] James E. Smith, and Andrew R. Pleszkun. "[Implementation of precise interrupts in pipelined processors.]
21* (https://dl.acm.org/doi/10.5555/327010.327125)" 12th Annual International Symposium on Computer Architecture (ISCA).
22* 1985.
23***************************************************************************************/
24
25package xiangshan.backend.rob
26
27import org.chipsalliance.cde.config.Parameters
28import chisel3._
29import chisel3.util._
30import chisel3.experimental.BundleLiterals._
31import difftest._
32import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
33import utility._
34import utils._
35import xiangshan._
36import xiangshan.backend.GPAMemEntry
37import xiangshan.backend.{BackendParams, RatToVecExcpMod, RegWriteFromRab, VecExcpInfo}
38import xiangshan.backend.Bundles.{DynInst, ExceptionInfo, ExuOutput}
39import xiangshan.backend.decode.isa.bitfield.XSInstBitFields
40import xiangshan.backend.fu.{FuConfig, FuType}
41import xiangshan.frontend.FtqPtr
42import xiangshan.mem.{LqPtr, LsqEnqIO, SqPtr}
43import xiangshan.backend.Bundles.{DynInst, ExceptionInfo, ExuOutput}
44import xiangshan.backend.ctrlblock.{DebugLSIO, DebugLsInfo, LsTopdownInfo}
45import xiangshan.backend.fu.vector.Bundles.VType
46import xiangshan.backend.rename.SnapshotGenerator
47import yunsuan.VfaluType
48import xiangshan.backend.rob.RobBundles._
49import xiangshan.backend.trace._
50import chisel3.experimental.BundleLiterals._
51
52class Rob(params: BackendParams)(implicit p: Parameters) extends LazyModule with HasXSParameter {
53  override def shouldBeInlined: Boolean = false
54
55  lazy val module = new RobImp(this)(p, params)
56}
57
58class RobImp(override val wrapper: Rob)(implicit p: Parameters, params: BackendParams) extends LazyModuleImp(wrapper)
59  with HasXSParameter with HasCircularQueuePtrHelper with HasPerfEvents with HasCriticalErrors {
60
61  private val LduCnt = params.LduCnt
62  private val StaCnt = params.StaCnt
63  private val HyuCnt = params.HyuCnt
64
65  val io = IO(new Bundle() {
66    val hartId = Input(UInt(hartIdLen.W))
67    val redirect = Input(Valid(new Redirect))
68    val enq = new RobEnqIO
69    val flushOut = ValidIO(new Redirect)
70    val exception = ValidIO(new ExceptionInfo)
71    // exu + brq
72    val writeback: MixedVec[ValidIO[ExuOutput]] = Flipped(params.genWrite2CtrlBundles)
73    val exuWriteback: MixedVec[ValidIO[ExuOutput]] = Flipped(params.genWrite2CtrlBundles)
74    val writebackNums = Flipped(Vec(writeback.size - params.StdCnt, ValidIO(UInt(writeback.size.U.getWidth.W))))
75    val writebackNeedFlush = Input(Vec(params.allExuParams.filter(_.needExceptionGen).length, Bool()))
76    val commits = Output(new RobCommitIO)
77    val trace = new Bundle {
78      val blockCommit = Input(Bool())
79      val traceCommitInfo = new TraceBundle(hasIaddr = false, CommitWidth, IretireWidthInPipe)
80    }
81    val rabCommits = Output(new RabCommitIO)
82    val diffCommits = if (backendParams.basicDebugEn) Some(Output(new DiffCommitIO)) else None
83    val isVsetFlushPipe = Output(Bool())
84    val lsq = new RobLsqIO
85    val robDeqPtr = Output(new RobPtr)
86    val csr = new RobCSRIO
87    val snpt = Input(new SnapshotPort)
88    val robFull = Output(Bool())
89    val headNotReady = Output(Bool())
90    val cpu_halt = Output(Bool())
91    val wfi_enable = Input(Bool())
92    val toDecode = new Bundle {
93      val isResumeVType = Output(Bool())
94      val walkToArchVType = Output(Bool())
95      val walkVType = ValidIO(VType())
96      val commitVType = new Bundle {
97        val vtype = ValidIO(VType())
98        val hasVsetvl = Output(Bool())
99      }
100    }
101    val fromVecExcpMod = Input(new Bundle {
102      val busy = Bool()
103    })
104    val readGPAMemAddr = ValidIO(new Bundle {
105      val ftqPtr = new FtqPtr()
106      val ftqOffset = UInt(log2Up(PredictWidth).W)
107    })
108    val readGPAMemData = Input(new GPAMemEntry)
109    val vstartIsZero = Input(Bool())
110
111    val toVecExcpMod = Output(new Bundle {
112      val logicPhyRegMap = Vec(RabCommitWidth, ValidIO(new RegWriteFromRab))
113      val excpInfo = ValidIO(new VecExcpInfo)
114    })
115    val debug_ls = Flipped(new DebugLSIO)
116    val debugRobHead = Output(new DynInst)
117    val debugEnqLsq = Input(new LsqEnqIO)
118    val debugHeadLsIssue = Input(Bool())
119    val lsTopdownInfo = Vec(LduCnt + HyuCnt, Input(new LsTopdownInfo))
120    val debugTopDown = new Bundle {
121      val toCore = new RobCoreTopDownIO
122      val toDispatch = new RobDispatchTopDownIO
123      val robHeadLqIdx = Valid(new LqPtr)
124    }
125    val debugRolling = new RobDebugRollingIO
126
127    // store event difftest information
128    val storeDebugInfo = Vec(EnsbufferWidth, new Bundle {
129      val robidx = Input(new RobPtr)
130      val pc     = Output(UInt(VAddrBits.W))
131    })
132  })
133
134  val exuWBs: Seq[ValidIO[ExuOutput]] = io.exuWriteback.filter(!_.bits.params.hasStdFu).toSeq
135  val stdWBs: Seq[ValidIO[ExuOutput]] = io.exuWriteback.filter(_.bits.params.hasStdFu).toSeq
136  val vldWBs: Seq[ValidIO[ExuOutput]] = io.exuWriteback.filter(_.bits.params.hasVLoadFu).toSeq
137  val fflagsWBs = io.exuWriteback.filter(x => x.bits.fflags.nonEmpty).toSeq
138  val exceptionWBs = io.writeback.filter(x => x.bits.exceptionVec.nonEmpty).toSeq
139  val redirectWBs = io.writeback.filter(x => x.bits.redirect.nonEmpty).toSeq
140  val vxsatWBs = io.exuWriteback.filter(x => x.bits.vxsat.nonEmpty).toSeq
141  val branchWBs = io.exuWriteback.filter(_.bits.params.hasBrhFu).toSeq
142  val jmpWBs = io.exuWriteback.filter(_.bits.params.hasJmpFu).toSeq
143  val csrWBs = io.exuWriteback.filter(x => x.bits.params.hasCSR).toSeq
144
145  PerfCCT.tick(clock, reset)
146
147  io.exuWriteback.zipWithIndex.foreach{ case (wb, i) =>
148    PerfCCT.updateInstPos(wb.bits.debug_seqNum, PerfCCT.InstPos.AtWriteVal.id.U, wb.valid, clock, reset)
149  }
150
151  val numExuWbPorts = exuWBs.length
152  val numStdWbPorts = stdWBs.length
153  val bankAddrWidth = log2Up(CommitWidth)
154
155  println(s"Rob: size $RobSize, numExuWbPorts: $numExuWbPorts, numStdWbPorts: $numStdWbPorts, commitwidth: $CommitWidth")
156
157  val rab = Module(new RenameBuffer(RabSize))
158  val vtypeBuffer = Module(new VTypeBuffer(VTypeBufferSize))
159  val bankNum = 8
160  assert(RobSize % bankNum == 0, "RobSize % bankNum must be 0")
161  val robEntries = RegInit(VecInit.fill(RobSize)((new RobEntryBundle).Lit(_.valid -> false.B)))
162  // pointers
163  // For enqueue ptr, we don't duplicate it since only enqueue needs it.
164  val enqPtrVec = Wire(Vec(RenameWidth, new RobPtr))
165  val deqPtrVec = Wire(Vec(CommitWidth, new RobPtr))
166  val deqPtrVec_next = Wire(Vec(CommitWidth, Output(new RobPtr)))
167  val walkPtrVec = Reg(Vec(CommitWidth, new RobPtr))
168  val walkPtrTrue = Reg(new RobPtr)
169  val lastWalkPtr = Reg(new RobPtr)
170  val allowEnqueue = RegInit(true.B)
171  val allowEnqueueForDispatch = RegInit(true.B)
172  val vecExcpInfo = RegInit(ValidIO(new VecExcpInfo).Lit(
173    _.valid -> false.B,
174  ))
175
176  /**
177   * Enqueue (from dispatch)
178   */
179  // special cases
180  val hasBlockBackward = RegInit(false.B)
181  val hasWaitForward = RegInit(false.B)
182  val enqPtr = enqPtrVec(0)
183  val deqPtr = deqPtrVec(0)
184  val walkPtr = walkPtrVec(0)
185  val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(io.enq.req.take(i).map(req => req.valid && req.bits.firstUop)))))
186  io.enq.canAccept := allowEnqueue && !hasBlockBackward && rab.io.canEnq && vtypeBuffer.io.canEnq && !io.fromVecExcpMod.busy
187  io.enq.canAcceptForDispatch := allowEnqueueForDispatch && !hasBlockBackward && rab.io.canEnqForDispatch && vtypeBuffer.io.canEnqForDispatch && !io.fromVecExcpMod.busy
188  io.enq.resp := allocatePtrVec
189  val canEnqueue = VecInit(io.enq.req.map(req => req.valid && req.bits.firstUop && io.enq.canAccept))
190  val timer = GTimer()
191  // robEntries enqueue
192  for (i <- 0 until RobSize) {
193    val enqOH = VecInit(canEnqueue.zip(allocatePtrVec.map(_.value === i.U)).map(x => x._1 && x._2))
194    assert(PopCount(enqOH) < 2.U, s"robEntries$i enqOH is not one hot")
195    when(enqOH.asUInt.orR && !io.redirect.valid){
196      connectEnq(robEntries(i), Mux1H(enqOH, io.enq.req.map(_.bits)))
197    }
198  }
199  // robBanks0 include robidx : 0 8 16 24 32 ...
200  val robBanks = VecInit((0 until bankNum).map(i => VecInit(robEntries.zipWithIndex.filter(_._2 % bankNum == i).map(_._1))))
201  // each Bank has 20 Entries, read addr is one hot
202  // all banks use same raddr
203  val eachBankEntrieNum = robBanks(0).length
204  val robBanksRaddrThisLine = RegInit(1.U(eachBankEntrieNum.W))
205  val robBanksRaddrNextLine = Wire(UInt(eachBankEntrieNum.W))
206  robBanksRaddrThisLine := robBanksRaddrNextLine
207  val bankNumWidth = log2Up(bankNum)
208  val deqPtrWidth = deqPtr.value.getWidth
209  val robIdxThisLine = VecInit((0 until bankNum).map(i => Cat(deqPtr.value(deqPtrWidth - 1, bankNumWidth), i.U(bankNumWidth.W))))
210  val robIdxNextLine = VecInit((0 until bankNum).map(i => Cat(deqPtr.value(deqPtrWidth - 1, bankNumWidth) + 1.U, i.U(bankNumWidth.W))))
211  // robBanks read
212  val robBanksRdataThisLine = VecInit(robBanks.map{ case bank =>
213    Mux1H(robBanksRaddrThisLine, bank)
214  })
215  val robBanksRdataNextLine = VecInit(robBanks.map{ case bank =>
216    val shiftBank = bank.drop(1) :+ bank(0)
217    Mux1H(robBanksRaddrThisLine, shiftBank)
218  })
219  val robBanksRdataThisLineUpdate = Wire(Vec(CommitWidth, new RobEntryBundle))
220  val robBanksRdataNextLineUpdate = Wire(Vec(CommitWidth, new RobEntryBundle))
221  val commitValidThisLine = Wire(Vec(CommitWidth, Bool()))
222  val hasCommitted = RegInit(VecInit(Seq.fill(CommitWidth)(false.B)))
223  val donotNeedWalk = RegInit(VecInit(Seq.fill(CommitWidth)(false.B)))
224  val allCommitted = Wire(Bool())
225
226  when(allCommitted) {
227    hasCommitted := 0.U.asTypeOf(hasCommitted)
228  }.elsewhen(io.commits.isCommit){
229    for (i <- 0 until CommitWidth){
230      hasCommitted(i) := commitValidThisLine(i) || hasCommitted(i)
231    }
232  }
233  allCommitted := io.commits.isCommit && commitValidThisLine.last
234  val walkPtrHead = Wire(new RobPtr)
235  val changeBankAddrToDeqPtr = (walkPtrVec.head + CommitWidth.U) > lastWalkPtr
236  when(io.redirect.valid){
237    robBanksRaddrNextLine := UIntToOH(walkPtrHead.value(walkPtrHead.value.getWidth-1, bankAddrWidth))
238  }.elsewhen(allCommitted || io.commits.isWalk && !changeBankAddrToDeqPtr){
239    robBanksRaddrNextLine := Mux(robBanksRaddrThisLine.head(1) === 1.U, 1.U, robBanksRaddrThisLine << 1)
240  }.elsewhen(io.commits.isWalk && changeBankAddrToDeqPtr){
241    robBanksRaddrNextLine := UIntToOH(deqPtr.value(deqPtr.value.getWidth-1, bankAddrWidth))
242  }.otherwise(
243    robBanksRaddrNextLine := robBanksRaddrThisLine
244  )
245  val robDeqGroup = Reg(Vec(bankNum, new RobCommitEntryBundle))
246  val rawInfo = VecInit((0 until CommitWidth).map(i => robDeqGroup(deqPtrVec(i).value(bankAddrWidth-1, 0)))).toSeq
247  val commitInfo = VecInit((0 until CommitWidth).map(i => robDeqGroup(deqPtrVec(i).value(bankAddrWidth-1,0)))).toSeq
248  val walkInfo = VecInit((0 until CommitWidth).map(i => robDeqGroup(walkPtrVec(i).value(bankAddrWidth-1, 0)))).toSeq
249  for (i <- 0 until CommitWidth) {
250    connectCommitEntry(robDeqGroup(i), robBanksRdataThisLineUpdate(i))
251    when(allCommitted){
252      connectCommitEntry(robDeqGroup(i), robBanksRdataNextLineUpdate(i))
253    }
254  }
255
256  // In each robentry, the ftqIdx and ftqOffset belong to the first instruction that was compressed,
257  // That is Necessary when exceptions happen.
258  // Update the ftqOffset to correctly notify the frontend which instructions have been committed.
259  // Instructions in multiple Ftq entries compressed to one RobEntry do not occur.
260  for (i <- 0 until CommitWidth) {
261    val lastOffset = (rawInfo(i).traceBlockInPipe.iretire - (1.U << rawInfo(i).traceBlockInPipe.ilastsize.asUInt).asUInt) + rawInfo(i).ftqOffset
262    commitInfo(i).ftqOffset := Mux(CommitType.isFused(rawInfo(i).commitType), rawInfo(i).ftqOffset, lastOffset)
263  }
264
265  // data for debug
266  // Warn: debug_* prefix should not exist in generated verilog.
267  val debug_microOp = DebugMem(RobSize, new DynInst)
268  val debug_exuData = Reg(Vec(RobSize, UInt(XLEN.W))) //for debug
269  val debug_exuDebug = Reg(Vec(RobSize, new DebugBundle)) //for debug
270  val debug_lsInfo = RegInit(VecInit(Seq.fill(RobSize)(DebugLsInfo.init)))
271  val debug_lsTopdownInfo = RegInit(VecInit(Seq.fill(RobSize)(LsTopdownInfo.init)))
272  val debug_lqIdxValid = RegInit(VecInit.fill(RobSize)(false.B))
273  val debug_lsIssued = RegInit(VecInit.fill(RobSize)(false.B))
274
275  val isEmpty = enqPtr === deqPtr
276  val snptEnq = io.enq.canAccept && io.enq.req.map(x => x.valid && x.bits.snapshot).reduce(_ || _)
277  val snapshotPtrVec = Wire(Vec(CommitWidth, new RobPtr))
278  snapshotPtrVec(0) := io.enq.req(0).bits.robIdx
279  for (i <- 1 until CommitWidth) {
280    snapshotPtrVec(i) := snapshotPtrVec(0) + i.U
281  }
282  val snapshots = SnapshotGenerator(snapshotPtrVec, snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec)
283  val debug_lsIssue = WireDefault(debug_lsIssued)
284  debug_lsIssue(deqPtr.value) := io.debugHeadLsIssue
285
286  /**
287   * states of Rob
288   */
289  val s_idle :: s_walk :: Nil = Enum(2)
290  val state = RegInit(s_idle)
291  val state_next = Wire(chiselTypeOf(state))
292
293  val tip_computing :: tip_stalled :: tip_walk :: tip_drained :: Nil = Enum(4)
294  val tip_state = WireInit(0.U(4.W))
295  when(!isEmpty) {  // One or more inst in ROB
296    when(state === s_walk || io.redirect.valid) {
297      tip_state := tip_walk
298    }.elsewhen(io.commits.isCommit && PopCount(io.commits.commitValid) =/= 0.U) {
299      tip_state := tip_computing
300    }.otherwise {
301      tip_state := tip_stalled
302    }
303  }.otherwise {
304    tip_state := tip_drained
305  }
306  class TipEntry()(implicit p: Parameters) extends XSBundle {
307    val state = UInt(4.W)
308    val commits = new RobCommitIO()      // info of commit
309    val redirect = Valid(new Redirect)   // info of redirect
310    val redirect_pc = UInt(VAddrBits.W)  // PC of the redirect uop
311    val debugLsInfo = new DebugLsInfo()
312  }
313  val tip_table = ChiselDB.createTable("Tip_" + p(XSCoreParamsKey).HartId.toString, new TipEntry)
314  val tip_data = Wire(new TipEntry())
315  tip_data.state := tip_state
316  tip_data.commits := io.commits
317  tip_data.redirect := io.redirect
318  tip_data.redirect_pc := debug_microOp(io.redirect.bits.robIdx.value).pc
319  tip_data.debugLsInfo := debug_lsInfo(io.commits.robIdx(0).value)
320  tip_table.log(tip_data, true.B, "", clock, reset)
321
322  val exceptionGen = Module(new ExceptionGen(params))
323  val exceptionDataRead = exceptionGen.io.state
324  val fflagsDataRead = Wire(Vec(CommitWidth, UInt(5.W)))
325  val vxsatDataRead = Wire(Vec(CommitWidth, Bool()))
326  io.robDeqPtr := deqPtr
327  io.debugRobHead := debug_microOp(deqPtr.value)
328
329  /**
330   * connection of [[rab]]
331   */
332  rab.io.redirect.valid := io.redirect.valid
333
334  rab.io.req.zip(io.enq.req).map { case (dest, src) =>
335    dest.bits := src.bits
336    dest.valid := src.valid && io.enq.canAccept
337  }
338
339  val walkDestSizeDeqGroup = RegInit(VecInit(Seq.fill(CommitWidth)(0.U(log2Up(MaxUopSize + 1).W))))
340  val realDestSizeSeq = VecInit(robDeqGroup.zip(hasCommitted).map{case (r, h) => Mux(h, 0.U, r.realDestSize)})
341  val walkDestSizeSeq = VecInit(robDeqGroup.zip(donotNeedWalk).map{case (r, d) => Mux(d, 0.U, r.realDestSize)})
342  val commitSizeSumSeq = VecInit((0 until CommitWidth).map(i => realDestSizeSeq.take(i + 1).reduce(_ +& _)))
343  val walkSizeSumSeq   = VecInit((0 until CommitWidth).map(i => walkDestSizeSeq.take(i + 1).reduce(_ +& _)))
344  val commitSizeSumCond = VecInit(commitValidThisLine.zip(hasCommitted).map{case (c,h) => (c || h) && io.commits.isCommit})
345  val walkSizeSumCond   = VecInit(io.commits.walkValid.zip(donotNeedWalk).map{case (w,d) => (w || d) && io.commits.isWalk})
346  val commitSizeSum = PriorityMuxDefault(commitSizeSumCond.reverse.zip(commitSizeSumSeq.reverse), 0.U)
347  val walkSizeSum   = PriorityMuxDefault(walkSizeSumCond.reverse.zip(walkSizeSumSeq.reverse), 0.U)
348
349  val deqVlsExceptionNeedCommit = RegInit(false.B)
350  val deqVlsExceptionCommitSize = RegInit(0.U(log2Up(MaxUopSize + 1).W))
351  val deqVlsCanCommit= RegInit(false.B)
352  rab.io.fromRob.commitSize := Mux(deqVlsExceptionNeedCommit, deqVlsExceptionCommitSize, commitSizeSum)
353  rab.io.fromRob.walkSize := walkSizeSum
354  rab.io.fromRob.vecLoadExcp.valid := RegNext(exceptionDataRead.valid && exceptionDataRead.bits.isVecLoad)
355  rab.io.fromRob.vecLoadExcp.bits.isStrided := RegEnable(exceptionDataRead.bits.isStrided, exceptionDataRead.valid)
356  rab.io.fromRob.vecLoadExcp.bits.isVlm := RegEnable(exceptionDataRead.bits.isVlm, exceptionDataRead.valid)
357  rab.io.snpt := io.snpt
358  rab.io.snpt.snptEnq := snptEnq
359
360  // pipe rab commits for better timing and area
361  io.rabCommits := RegNext(rab.io.commits)
362  io.diffCommits.foreach(_ := rab.io.diffCommits.get)
363
364  /**
365   * connection of [[vtypeBuffer]]
366   */
367
368  vtypeBuffer.io.redirect.valid := io.redirect.valid
369
370  vtypeBuffer.io.req.zip(io.enq.req).map { case (sink, source) =>
371    sink.valid := source.valid && io.enq.canAccept
372    sink.bits := source.bits
373  }
374
375  private val commitIsVTypeVec = VecInit(io.commits.commitValid.zip(io.commits.info).map { case (valid, info) => io.commits.isCommit && valid && info.isVset })
376  private val walkIsVTypeVec = VecInit(io.commits.walkValid.zip(walkInfo).map { case (valid, info) => io.commits.isWalk && valid && info.isVset })
377  vtypeBuffer.io.fromRob.commitSize := PopCount(commitIsVTypeVec)
378  vtypeBuffer.io.fromRob.walkSize := PopCount(walkIsVTypeVec)
379  vtypeBuffer.io.snpt := io.snpt
380  vtypeBuffer.io.snpt.snptEnq := snptEnq
381  io.toDecode.walkToArchVType := vtypeBuffer.io.toDecode.walkToArchVType
382  io.toDecode.commitVType := vtypeBuffer.io.toDecode.commitVType
383  io.toDecode.walkVType := vtypeBuffer.io.toDecode.walkVType
384
385  // When blockBackward instruction leaves Rob (commit or walk), hasBlockBackward should be set to false.B
386  // To reduce registers usage, for hasBlockBackward cases, we allow enqueue after ROB is empty.
387  when(isEmpty) {
388    hasBlockBackward := false.B
389  }
390  // When any instruction commits, hasNoSpecExec should be set to false.B
391  when(io.commits.hasWalkInstr || io.commits.hasCommitInstr) {
392    hasWaitForward := false.B
393  }
394
395  // The wait-for-interrupt (WFI) instruction waits in the ROB until an interrupt might need servicing.
396  // io.csr.wfiEvent will be asserted if the WFI can resume execution, and we change the state to s_wfi_idle.
397  // It does not affect how interrupts are serviced. Note that WFI is noSpecExec and it does not trigger interrupts.
398  val hasWFI = RegInit(false.B)
399  io.cpu_halt := hasWFI
400  // WFI Timeout: 2^20 = 1M cycles
401  val wfi_cycles = RegInit(0.U(20.W))
402  if (wfiResume) {
403    when(hasWFI) {
404      wfi_cycles := wfi_cycles + 1.U
405    }.elsewhen(!hasWFI && RegNext(hasWFI)) {
406      wfi_cycles := 0.U
407    }
408  }
409  val wfi_timeout = wfi_cycles.andR
410  when(RegNext(RegNext(io.csr.wfiEvent)) || io.flushOut.valid || wfi_timeout) {
411    hasWFI := false.B
412  }
413
414  for (i <- 0 until RenameWidth) {
415    // we don't check whether io.redirect is valid here since redirect has higher priority
416    when(canEnqueue(i)) {
417      val enqUop = io.enq.req(i).bits
418      val enqIndex = allocatePtrVec(i).value
419      // store uop in data module and debug_microOp Vec
420      debug_microOp(enqIndex) := enqUop
421      debug_microOp(enqIndex).debugInfo.dispatchTime := timer
422      debug_microOp(enqIndex).debugInfo.enqRsTime := timer
423      debug_microOp(enqIndex).debugInfo.selectTime := timer
424      debug_microOp(enqIndex).debugInfo.issueTime := timer
425      debug_microOp(enqIndex).debugInfo.writebackTime := timer
426      debug_microOp(enqIndex).debugInfo.tlbFirstReqTime := timer
427      debug_microOp(enqIndex).debugInfo.tlbRespTime := timer
428      debug_lsInfo(enqIndex) := DebugLsInfo.init
429      debug_lsTopdownInfo(enqIndex) := LsTopdownInfo.init
430      debug_lqIdxValid(enqIndex) := false.B
431      debug_lsIssued(enqIndex) := false.B
432      when (enqUop.waitForward) {
433        hasWaitForward := true.B
434      }
435      val enqTriggerActionIsDebugMode = TriggerAction.isDmode(io.enq.req(i).bits.trigger)
436      val enqHasException = ExceptionNO.selectFrontend(enqUop.exceptionVec).asUInt.orR
437      when(enqUop.isWFI && !enqHasException && !enqTriggerActionIsDebugMode) {
438        hasWFI := true.B
439      }
440
441      robEntries(enqIndex).mmio := false.B
442      robEntries(enqIndex).vls := enqUop.vlsInstr
443    }
444  }
445
446  for (i <- 0 until RenameWidth) {
447    val enqUop = io.enq.req(i)
448    when(enqUop.valid && enqUop.bits.blockBackward && io.enq.canAccept) {
449      hasBlockBackward := true.B
450    }
451  }
452
453  val dispatchNum = Mux(io.enq.canAccept, PopCount(io.enq.req.map(req => req.valid && req.bits.firstUop)), 0.U)
454  io.enq.isEmpty := RegNext(isEmpty && !VecInit(io.enq.req.map(_.valid)).asUInt.orR)
455
456  when(!io.wfi_enable) {
457    hasWFI := false.B
458  }
459  // sel vsetvl's flush position
460  val vs_idle :: vs_waitVinstr :: vs_waitFlush :: Nil = Enum(3)
461  val vsetvlState = RegInit(vs_idle)
462
463  val firstVInstrFtqPtr = RegInit(0.U.asTypeOf(new FtqPtr))
464  val firstVInstrFtqOffset = RegInit(0.U.asTypeOf(UInt(log2Up(PredictWidth).W)))
465  val firstVInstrRobIdx = RegInit(0.U.asTypeOf(new RobPtr))
466
467  val enq0 = io.enq.req(0)
468  val enq0IsVset = enq0.bits.isVset && enq0.bits.lastUop && canEnqueue(0)
469  val enq0IsVsetFlush = enq0IsVset && enq0.bits.flushPipe
470  val enqIsVInstrVec = io.enq.req.zip(canEnqueue).map { case (req, fire) => FuType.isVArith(req.bits.fuType) && fire }
471  // for vs_idle
472  val firstVInstrIdle = PriorityMux(enqIsVInstrVec.zip(io.enq.req).drop(1) :+ (true.B, 0.U.asTypeOf(io.enq.req(0).cloneType)))
473  // for vs_waitVinstr
474  val enqIsVInstrOrVset = (enqIsVInstrVec(0) || enq0IsVset) +: enqIsVInstrVec.drop(1)
475  val firstVInstrWait = PriorityMux(enqIsVInstrOrVset, io.enq.req)
476  when(vsetvlState === vs_idle) {
477    firstVInstrFtqPtr := firstVInstrIdle.bits.ftqPtr
478    firstVInstrFtqOffset := firstVInstrIdle.bits.ftqOffset
479    firstVInstrRobIdx := firstVInstrIdle.bits.robIdx
480  }.elsewhen(vsetvlState === vs_waitVinstr) {
481    when(Cat(enqIsVInstrOrVset).orR) {
482      firstVInstrFtqPtr := firstVInstrWait.bits.ftqPtr
483      firstVInstrFtqOffset := firstVInstrWait.bits.ftqOffset
484      firstVInstrRobIdx := firstVInstrWait.bits.robIdx
485    }
486  }
487
488  val hasVInstrAfterI = Cat(enqIsVInstrVec(0)).orR
489  when(vsetvlState === vs_idle && !io.redirect.valid) {
490    when(enq0IsVsetFlush) {
491      vsetvlState := Mux(hasVInstrAfterI, vs_waitFlush, vs_waitVinstr)
492    }
493  }.elsewhen(vsetvlState === vs_waitVinstr) {
494    when(io.redirect.valid) {
495      vsetvlState := vs_idle
496    }.elsewhen(Cat(enqIsVInstrOrVset).orR) {
497      vsetvlState := vs_waitFlush
498    }
499  }.elsewhen(vsetvlState === vs_waitFlush) {
500    when(io.redirect.valid) {
501      vsetvlState := vs_idle
502    }
503  }
504
505  // lqEnq
506  io.debugEnqLsq.needAlloc.map(_(0)).zip(io.debugEnqLsq.req).foreach { case (alloc, req) =>
507    when(io.debugEnqLsq.canAccept && alloc && req.valid) {
508      debug_microOp(req.bits.robIdx.value).lqIdx := req.bits.lqIdx
509      debug_lqIdxValid(req.bits.robIdx.value) := true.B
510    }
511  }
512
513  // lsIssue
514  when(io.debugHeadLsIssue) {
515    debug_lsIssued(deqPtr.value) := true.B
516  }
517
518  /**
519   * Writeback (from execution units)
520   */
521  for (wb <- exuWBs) {
522    val wbIdx = wb.bits.robIdx.value
523    val debug_Uop = debug_microOp(wbIdx)
524    when(wb.valid) {
525      debug_exuData(wbIdx) := wb.bits.data(0)
526      debug_exuDebug(wbIdx) := wb.bits.debug
527      debug_microOp(wbIdx).debugInfo.enqRsTime := wb.bits.debugInfo.enqRsTime
528      debug_microOp(wbIdx).debugInfo.selectTime := wb.bits.debugInfo.selectTime
529      debug_microOp(wbIdx).debugInfo.issueTime := wb.bits.debugInfo.issueTime
530      debug_microOp(wbIdx).debugInfo.writebackTime := wb.bits.debugInfo.writebackTime
531
532      // debug for lqidx and sqidx
533      debug_microOp(wbIdx).lqIdx := wb.bits.lqIdx.getOrElse(0.U.asTypeOf(new LqPtr))
534      debug_microOp(wbIdx).sqIdx := wb.bits.sqIdx.getOrElse(0.U.asTypeOf(new SqPtr))
535    }
536    XSInfo(wb.valid,
537      p"writebacked pc 0x${Hexadecimal(debug_Uop.pc)} wen ${debug_Uop.rfWen} " +
538        p"data 0x${Hexadecimal(wb.bits.data(0))} ldst ${debug_Uop.ldest} pdst ${debug_Uop.pdest} " +
539        p"skip ${wb.bits.debug.isSkipDiff} robIdx: ${wb.bits.robIdx}\n"
540    )
541  }
542
543  val writebackNum = PopCount(exuWBs.map(_.valid))
544  XSInfo(writebackNum =/= 0.U, "writebacked %d insts\n", writebackNum)
545
546  for (i <- 0 until LoadPipelineWidth) {
547    when(RegNext(io.lsq.mmio(i))) {
548      robEntries(RegEnable(io.lsq.uop(i).robIdx, io.lsq.mmio(i)).value).mmio := true.B
549    }
550  }
551
552
553  /**
554   * RedirectOut: Interrupt and Exceptions
555   */
556  val debug_deqUop = debug_microOp(deqPtr.value)
557
558  val deqPtrEntry = rawInfo(0)
559  val deqPtrEntryValid = deqPtrEntry.commit_v
560  val deqHasFlushed = RegInit(false.B)
561  val intrBitSetReg = RegNext(io.csr.intrBitSet)
562  val intrEnable = intrBitSetReg && !hasWaitForward && deqPtrEntry.interrupt_safe && !deqHasFlushed
563  val deqNeedFlush = deqPtrEntry.needFlush && deqPtrEntry.commit_v && deqPtrEntry.commit_w
564  val deqHitExceptionGenState = exceptionDataRead.valid && exceptionDataRead.bits.robIdx === deqPtr
565  val deqNeedFlushAndHitExceptionGenState = deqNeedFlush && deqHitExceptionGenState
566  val exceptionGenStateIsException = exceptionDataRead.bits.exceptionVec.asUInt.orR || exceptionDataRead.bits.singleStep || TriggerAction.isDmode(exceptionDataRead.bits.trigger)
567  val deqHasException = deqNeedFlushAndHitExceptionGenState && exceptionGenStateIsException && (!deqPtrEntry.isVls || RegNext(RegNext(deqPtrEntry.commit_w)))
568  val deqHasFlushPipe = deqNeedFlushAndHitExceptionGenState && exceptionDataRead.bits.flushPipe && !deqHasException && (!deqPtrEntry.isVls || RegNext(RegNext(deqPtrEntry.commit_w)))
569  val deqHasReplayInst = deqNeedFlushAndHitExceptionGenState && exceptionDataRead.bits.replayInst
570  val deqIsVlsException = deqHasException && deqPtrEntry.isVls && !exceptionDataRead.bits.isEnqExcp
571  // delay 2 cycle wait exceptionGen out
572  // vls exception can be committed only when RAB commit all its reg pairs
573  deqVlsCanCommit := RegNext(RegNext(deqIsVlsException && deqPtrEntry.commit_w)) && rab.io.status.commitEnd
574
575  // lock at assertion of deqVlsExceptionNeedCommit until condition not assert
576  val deqVlsExcpLock = RegInit(false.B)
577  val handleVlsExcp = deqIsVlsException && deqVlsCanCommit && !deqVlsExcpLock && state === s_idle
578  when(handleVlsExcp) {
579    deqVlsExcpLock := true.B
580  }.elsewhen(deqPtrVec.head =/= deqPtrVec_next.head) {
581    deqVlsExcpLock := false.B
582  }
583
584  // Only assert once when deqVlsExcp occurs until condition not assert to avoid multi message passed to RAB
585  when (deqVlsExceptionNeedCommit) {
586    deqVlsExceptionNeedCommit := false.B
587  }.elsewhen(handleVlsExcp){
588    deqVlsExceptionCommitSize := deqPtrEntry.realDestSize
589    deqVlsExceptionNeedCommit := true.B
590  }
591
592  XSDebug(deqHasException && exceptionDataRead.bits.singleStep, "Debug Mode: Deq has singlestep exception\n")
593  XSDebug(deqHasException && TriggerAction.isDmode(exceptionDataRead.bits.trigger), "Debug Mode: Deq has trigger entry debug Mode\n")
594
595  val isFlushPipe = deqPtrEntry.commit_w && (deqHasFlushPipe || deqHasReplayInst)
596
597  // vsetvl instruction need another one cycle to write to vtype gen
598  val isVsetFlushPipe = deqPtrEntry.commit_w && deqHasFlushed && exceptionDataRead.bits.isVset
599  val isVsetFlushPipeReg = RegNext(isVsetFlushPipe)
600  //  val needModifyFtqIdxOffset = isVsetFlushPipe && (vsetvlState === vs_waitFlush)
601  val needModifyFtqIdxOffset = false.B
602  io.isVsetFlushPipe := isVsetFlushPipe
603  io.toDecode.isResumeVType := vtypeBuffer.io.toDecode.isResumeVType || isVsetFlushPipeReg
604  // io.flushOut will trigger redirect at the next cycle.
605  // Block any redirect or commit at the next cycle.
606  val lastCycleFlush = RegNext(io.flushOut.valid)
607
608  io.flushOut.valid := (state === s_idle) && deqPtrEntryValid && (intrEnable || deqHasException && (!deqIsVlsException || deqVlsCanCommit) || isFlushPipe) && !lastCycleFlush
609  io.flushOut.bits := DontCare
610  io.flushOut.bits.isRVC := deqPtrEntry.isRVC
611  io.flushOut.bits.robIdx := Mux(needModifyFtqIdxOffset, firstVInstrRobIdx, deqPtr)
612  io.flushOut.bits.ftqIdx := Mux(needModifyFtqIdxOffset, firstVInstrFtqPtr, deqPtrEntry.ftqIdx)
613  io.flushOut.bits.ftqOffset := Mux(needModifyFtqIdxOffset, firstVInstrFtqOffset, deqPtrEntry.ftqOffset)
614  io.flushOut.bits.level := Mux(deqHasReplayInst || intrEnable || deqHasException || needModifyFtqIdxOffset, RedirectLevel.flush, RedirectLevel.flushAfter) // TODO use this to implement "exception next"
615  io.flushOut.bits.interrupt := true.B
616  XSPerfAccumulate("flush_num", io.flushOut.valid)
617  XSPerfAccumulate("interrupt_num", io.flushOut.valid && intrEnable)
618  XSPerfAccumulate("exception_num", io.flushOut.valid && deqHasException)
619  XSPerfAccumulate("flush_pipe_num", io.flushOut.valid && isFlushPipe)
620  XSPerfAccumulate("replay_inst_num", io.flushOut.valid && isFlushPipe && deqHasReplayInst)
621
622  val exceptionHappen = (state === s_idle) && deqPtrEntryValid && (intrEnable || deqHasException && (!deqIsVlsException || deqVlsCanCommit)) && !lastCycleFlush
623  io.exception.valid := RegNext(exceptionHappen)
624  io.exception.bits.pc := RegEnable(debug_deqUop.pc, exceptionHappen)
625  io.exception.bits.gpaddr := io.readGPAMemData.gpaddr
626  io.exception.bits.isForVSnonLeafPTE := io.readGPAMemData.isForVSnonLeafPTE
627  io.exception.bits.instr := RegEnable(debug_deqUop.instr, exceptionHappen)
628  io.exception.bits.commitType := RegEnable(deqPtrEntry.commitType, exceptionHappen)
629  io.exception.bits.exceptionVec := RegEnable(exceptionDataRead.bits.exceptionVec, exceptionHappen)
630  // fetch trigger fire or execute ebreak
631  io.exception.bits.isPcBkpt := RegEnable(
632    exceptionDataRead.bits.exceptionVec(ExceptionNO.EX_BP) && (
633      exceptionDataRead.bits.isEnqExcp ||
634      exceptionDataRead.bits.trigger === TriggerAction.None
635    ),
636    exceptionHappen,
637  )
638  io.exception.bits.isFetchMalAddr := RegEnable(exceptionDataRead.bits.isFetchMalAddr && deqHasException, exceptionHappen)
639  io.exception.bits.singleStep := RegEnable(exceptionDataRead.bits.singleStep, exceptionHappen)
640  io.exception.bits.crossPageIPFFix := RegEnable(exceptionDataRead.bits.crossPageIPFFix, exceptionHappen)
641  io.exception.bits.isInterrupt := RegEnable(intrEnable, exceptionHappen)
642  io.exception.bits.isHls := RegEnable(deqPtrEntry.isHls, exceptionHappen)
643  io.exception.bits.vls := RegEnable(deqPtrEntry.vls, exceptionHappen)
644  io.exception.bits.trigger := RegEnable(exceptionDataRead.bits.trigger, exceptionHappen)
645
646  // data will be one cycle after valid
647  io.readGPAMemAddr.valid := exceptionHappen
648  io.readGPAMemAddr.bits.ftqPtr := exceptionDataRead.bits.ftqPtr
649  io.readGPAMemAddr.bits.ftqOffset := exceptionDataRead.bits.ftqOffset
650
651  XSDebug(io.flushOut.valid,
652    p"generate redirect: pc 0x${Hexadecimal(io.exception.bits.pc)} intr $intrEnable " +
653      p"excp $deqHasException flushPipe $isFlushPipe " +
654      p"Trap_target 0x${Hexadecimal(io.csr.trapTarget.pc)} exceptionVec ${Binary(exceptionDataRead.bits.exceptionVec.asUInt)}\n")
655
656
657  /**
658   * Commits (and walk)
659   * They share the same width.
660   */
661  // T redirect.valid, T+1 use walkPtrVec read robEntries, T+2 start walk, shouldWalkVec used in T+2
662  val shouldWalkVec = Wire(Vec(CommitWidth,Bool()))
663  val walkingPtrVec = RegNext(walkPtrVec)
664  when(io.redirect.valid){
665    shouldWalkVec := 0.U.asTypeOf(shouldWalkVec)
666  }.elsewhen(RegNext(io.redirect.valid)){
667    shouldWalkVec := 0.U.asTypeOf(shouldWalkVec)
668  }.elsewhen(state === s_walk){
669    shouldWalkVec := VecInit(walkingPtrVec.map(_ <= lastWalkPtr).zip(donotNeedWalk).map(x => x._1 && !x._2))
670  }.otherwise(
671    shouldWalkVec := 0.U.asTypeOf(shouldWalkVec)
672  )
673  val walkFinished = walkPtrTrue > lastWalkPtr
674  rab.io.fromRob.walkEnd := state === s_walk && walkFinished
675  vtypeBuffer.io.fromRob.walkEnd := state === s_walk && walkFinished
676
677  require(RenameWidth <= CommitWidth)
678
679  // wiring to csr
680  val (wflags, dirtyFs) = (0 until CommitWidth).map(i => {
681    val v = io.commits.commitValid(i)
682    val info = io.commits.info(i)
683    (v & info.wflags, v & info.dirtyFs)
684  }).unzip
685  val fflags = Wire(Valid(UInt(5.W)))
686  fflags.valid := io.commits.isCommit && VecInit(wflags).asUInt.orR
687  fflags.bits := wflags.zip(fflagsDataRead).map({
688    case (w, f) => Mux(w, f, 0.U)
689  }).reduce(_ | _)
690  val dirtyVs = (0 until CommitWidth).map(i => {
691    val v = io.commits.commitValid(i)
692    val info = io.commits.info(i)
693    v & info.dirtyVs
694  })
695  val dirty_fs = io.commits.isCommit && VecInit(dirtyFs).asUInt.orR
696  val dirty_vs = io.commits.isCommit && VecInit(dirtyVs).asUInt.orR
697
698  val resetVstart = dirty_vs && !io.vstartIsZero
699
700  vecExcpInfo.valid := exceptionHappen && !intrEnable && exceptionDataRead.bits.vstartEn && exceptionDataRead.bits.isVecLoad && !exceptionDataRead.bits.isEnqExcp
701  when (exceptionHappen) {
702    vecExcpInfo.bits.nf := exceptionDataRead.bits.nf
703    vecExcpInfo.bits.vsew := exceptionDataRead.bits.vsew
704    vecExcpInfo.bits.veew := exceptionDataRead.bits.veew
705    vecExcpInfo.bits.vlmul := exceptionDataRead.bits.vlmul
706    vecExcpInfo.bits.isStride := exceptionDataRead.bits.isStrided
707    vecExcpInfo.bits.isIndexed := exceptionDataRead.bits.isIndexed
708    vecExcpInfo.bits.isWhole := exceptionDataRead.bits.isWhole
709    vecExcpInfo.bits.isVlm := exceptionDataRead.bits.isVlm
710    vecExcpInfo.bits.vstart := exceptionDataRead.bits.vstart
711  }
712
713  io.csr.vstart.valid := RegNext(Mux(exceptionHappen && deqHasException, exceptionDataRead.bits.vstartEn, resetVstart))
714  io.csr.vstart.bits := RegNext(Mux(exceptionHappen && deqHasException, exceptionDataRead.bits.vstart, 0.U))
715
716  val vxsat = Wire(Valid(Bool()))
717  vxsat.valid := io.commits.isCommit && vxsat.bits
718  vxsat.bits := io.commits.commitValid.zip(vxsatDataRead).map {
719    case (valid, vxsat) => valid & vxsat
720  }.reduce(_ | _)
721
722  // when mispredict branches writeback, stop commit in the next 2 cycles
723  // TODO: don't check all exu write back
724  val misPredWb = Cat(VecInit(redirectWBs.map(wb =>
725    wb.bits.redirect.get.bits.cfiUpdate.isMisPred && wb.bits.redirect.get.valid && wb.valid
726  ).toSeq)).orR
727  val misPredBlockCounter = Reg(UInt(3.W))
728  misPredBlockCounter := Mux(misPredWb,
729    "b111".U,
730    misPredBlockCounter >> 1.U
731  )
732  val misPredBlock = misPredBlockCounter(0)
733  val deqFlushBlockCounter = Reg(UInt(3.W))
734  val deqFlushBlock = deqFlushBlockCounter(0)
735  val deqHasCommitted = io.commits.isCommit && io.commits.commitValid(0)
736  // TODO *** WARNING ***
737  // Blocking commit. Don't change this before we fully understand the logic.
738  val deqHitRedirectReg = RegNext(io.redirect.valid && io.redirect.bits.robIdx === deqPtr) || RegNext(RegNext(io.redirect.valid && io.redirect.bits.robIdx === deqPtr))
739  val criticalErrorState = io.csr.criticalErrorState
740  when(deqNeedFlush && deqHitRedirectReg){
741    deqFlushBlockCounter := "b111".U
742  }.otherwise{
743    deqFlushBlockCounter := deqFlushBlockCounter >> 1.U
744  }
745  when(deqHasCommitted){
746    deqHasFlushed := false.B
747  }.elsewhen(deqNeedFlush && io.flushOut.valid && !io.flushOut.bits.flushItself()){
748    deqHasFlushed := true.B
749  }
750  val traceBlock = io.trace.blockCommit
751  val blockCommit = misPredBlock || lastCycleFlush || hasWFI || io.redirect.valid ||
752    (deqNeedFlush && !deqHasFlushed) || deqFlushBlock || criticalErrorState || traceBlock
753
754  io.commits.isWalk := state === s_walk
755  io.commits.isCommit := state === s_idle && !blockCommit
756
757  val walk_v = VecInit(walkingPtrVec.map(ptr => robEntries(ptr.value).valid))
758  val commit_vDeqGroup = VecInit(robDeqGroup.map(_.commit_v))
759  val commit_wDeqGroup = VecInit(robDeqGroup.map(_.commit_w))
760  val realCommitLast = deqPtrVec(0).lineHeadPtr + Fill(bankAddrWidth, 1.U)
761  val commit_block = VecInit((0 until CommitWidth).map(i => !commit_wDeqGroup(i) && !hasCommitted(i)))
762  val allowOnlyOneCommit = VecInit(robDeqGroup.map(x => x.commit_v && x.needFlush)).asUInt.orR || intrBitSetReg
763  // for instructions that may block others, we don't allow them to commit
764  io.commits.commitValid := PriorityMux(commitValidThisLine, (0 until CommitWidth).map(i => (commitValidThisLine.asUInt >> i).asUInt.asTypeOf(io.commits.commitValid)))
765
766  for (i <- 0 until CommitWidth) {
767    // defaults: state === s_idle and instructions commit
768    // when intrBitSetReg, allow only one instruction to commit at each clock cycle
769    val isBlocked = intrEnable || (deqNeedFlush && !deqHasFlushed)
770    val isBlockedByOlder = if (i != 0) commit_block.asUInt(i, 0).orR || allowOnlyOneCommit && !hasCommitted.asUInt(i - 1, 0).andR else false.B
771    commitValidThisLine(i) := commit_vDeqGroup(i) && commit_wDeqGroup(i) && !isBlocked && !isBlockedByOlder && !hasCommitted(i)
772    io.commits.info(i) := commitInfo(i)
773    io.commits.robIdx(i) := deqPtrVec(i)
774    val deqDebugInst = debug_microOp(deqPtrVec(i).value)
775    PerfCCT.commitInstMeta(i.U, deqDebugInst.debug_seqNum, deqDebugInst.instrSize, io.commits.isCommit && io.commits.commitValid(i), clock, reset)
776
777    io.commits.walkValid(i) := shouldWalkVec(i)
778    XSError(
779      state === s_walk &&
780      io.commits.isWalk && state === s_walk && shouldWalkVec(i) &&
781      !walk_v(i),
782      s"The walking entry($i) should be valid\n")
783
784    XSInfo(io.commits.isCommit && io.commits.commitValid(i),
785      "retired pc %x wen %d ldest %d pdest %x data %x fflags: %b vxsat: %b\n",
786      debug_microOp(deqPtrVec(i).value).pc,
787      io.commits.info(i).rfWen,
788      io.commits.info(i).debug_ldest.getOrElse(0.U),
789      io.commits.info(i).debug_pdest.getOrElse(0.U),
790      debug_exuData(deqPtrVec(i).value),
791      fflagsDataRead(i),
792      vxsatDataRead(i)
793    )
794    XSInfo(state === s_walk && io.commits.walkValid(i), "walked pc %x wen %d ldst %d data %x\n",
795      debug_microOp(walkPtrVec(i).value).pc,
796      io.commits.info(i).rfWen,
797      io.commits.info(i).debug_ldest.getOrElse(0.U),
798      debug_exuData(walkPtrVec(i).value)
799    )
800  }
801
802  // sync fflags/dirty_fs/vxsat to csr
803  io.csr.fflags   := RegNextWithEnable(fflags)
804  io.csr.dirty_fs := GatedValidRegNext(dirty_fs)
805  io.csr.dirty_vs := GatedValidRegNext(dirty_vs)
806  io.csr.vxsat    := RegNextWithEnable(vxsat)
807
808  // commit load/store to lsq
809  val ldCommitVec = VecInit((0 until CommitWidth).map(i => io.commits.commitValid(i) && io.commits.info(i).commitType === CommitType.LOAD))
810  // TODO: Check if meet the require that only set scommit when commit scala store uop
811  val stCommitVec = VecInit((0 until CommitWidth).map(i => io.commits.commitValid(i) && io.commits.info(i).commitType === CommitType.STORE && !robEntries(deqPtrVec(i).value).vls ))
812  io.lsq.lcommit := RegNext(Mux(io.commits.isCommit, PopCount(ldCommitVec), 0.U))
813  io.lsq.scommit := RegNext(Mux(io.commits.isCommit, PopCount(stCommitVec), 0.U))
814  // indicate a pending load or store
815  io.lsq.pendingMMIOld := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.LOAD && deqPtrEntryValid && deqPtrEntry.mmio)
816  io.lsq.pendingld := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.LOAD && deqPtrEntryValid)
817  // TODO: Check if need deassert pendingst when it is vst
818  io.lsq.pendingst := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.STORE && deqPtrEntryValid)
819  // TODO: Check if set correctly when vector store is at the head of ROB
820  io.lsq.pendingVst := RegNext(io.commits.isCommit && io.commits.info(0).commitType === CommitType.STORE && deqPtrEntryValid && deqPtrEntry.vls)
821  io.lsq.commit := RegNext(io.commits.isCommit && io.commits.commitValid(0))
822  io.lsq.pendingPtr := RegNext(deqPtr)
823  io.lsq.pendingPtrNext := RegNext(deqPtrVec_next.head)
824
825  /**
826   * state changes
827   * (1) redirect: switch to s_walk
828   * (2) walk: when walking comes to the end, switch to s_idle
829   */
830  state_next := Mux(
831    io.redirect.valid || RegNext(io.redirect.valid), s_walk,
832    Mux(
833      state === s_walk && walkFinished && rab.io.status.walkEnd && vtypeBuffer.io.status.walkEnd, s_idle,
834      state
835    )
836  )
837  XSPerfAccumulate("s_idle_to_idle", state === s_idle && state_next === s_idle)
838  XSPerfAccumulate("s_idle_to_walk", state === s_idle && state_next === s_walk)
839  XSPerfAccumulate("s_walk_to_idle", state === s_walk && state_next === s_idle)
840  XSPerfAccumulate("s_walk_to_walk", state === s_walk && state_next === s_walk)
841  state := state_next
842
843  /**
844   * pointers and counters
845   */
846  val deqPtrGenModule = Module(new NewRobDeqPtrWrapper)
847  deqPtrGenModule.io.state := state
848  deqPtrGenModule.io.deq_v := commit_vDeqGroup
849  deqPtrGenModule.io.deq_w := commit_wDeqGroup
850  deqPtrGenModule.io.exception_state := exceptionDataRead
851  deqPtrGenModule.io.intrBitSetReg := intrBitSetReg
852  deqPtrGenModule.io.hasNoSpecExec := hasWaitForward
853  deqPtrGenModule.io.allowOnlyOneCommit := allowOnlyOneCommit
854  deqPtrGenModule.io.interrupt_safe := robDeqGroup(deqPtr.value(bankAddrWidth-1,0)).interrupt_safe
855  deqPtrGenModule.io.blockCommit := blockCommit
856  deqPtrGenModule.io.hasCommitted := hasCommitted
857  deqPtrGenModule.io.allCommitted := allCommitted
858  deqPtrVec := deqPtrGenModule.io.out
859  deqPtrVec_next := deqPtrGenModule.io.next_out
860
861  val enqPtrGenModule = Module(new RobEnqPtrWrapper)
862  enqPtrGenModule.io.redirect := io.redirect
863  enqPtrGenModule.io.allowEnqueue := allowEnqueue && rab.io.canEnq && !io.fromVecExcpMod.busy
864  enqPtrGenModule.io.hasBlockBackward := hasBlockBackward
865  enqPtrGenModule.io.enq := VecInit(io.enq.req.map(req => req.valid && req.bits.firstUop))
866  enqPtrVec := enqPtrGenModule.io.out
867
868  // next walkPtrVec:
869  // (1) redirect occurs: update according to state
870  // (2) walk: move forwards
871  val deqPtrReadBank = deqPtrVec_next(0).lineHeadPtr
872  val deqPtrVecForWalk = VecInit((0 until CommitWidth).map(i => deqPtrReadBank + i.U))
873  val snapPtrReadBank = snapshots(io.snpt.snptSelect)(0).lineHeadPtr
874  val snapPtrVecForWalk = VecInit((0 until CommitWidth).map(i => snapPtrReadBank + i.U))
875  val walkPtrVec_next: Vec[RobPtr] = Mux(io.redirect.valid,
876    Mux(io.snpt.useSnpt, snapPtrVecForWalk, deqPtrVecForWalk),
877    Mux((state === s_walk) && !walkFinished, VecInit(walkPtrVec.map(_ + CommitWidth.U)), walkPtrVec)
878  )
879  val walkPtrTrue_next: RobPtr = Mux(io.redirect.valid,
880    Mux(io.snpt.useSnpt, snapshots(io.snpt.snptSelect)(0), deqPtrVec_next(0)),
881    Mux((state === s_walk) && !walkFinished, walkPtrVec_next.head, walkPtrTrue)
882  )
883  walkPtrHead := walkPtrVec_next.head
884  walkPtrVec := walkPtrVec_next
885  walkPtrTrue := walkPtrTrue_next
886  // T io.redirect.valid, T+1 walkPtrLowBits update, T+2 donotNeedWalk update
887  val walkPtrLowBits = Reg(UInt(bankAddrWidth.W))
888  when(io.redirect.valid){
889    walkPtrLowBits := Mux(io.snpt.useSnpt, snapshots(io.snpt.snptSelect)(0).value(bankAddrWidth-1, 0), deqPtrVec_next(0).value(bankAddrWidth-1, 0))
890  }
891  when(io.redirect.valid) {
892    donotNeedWalk := Fill(donotNeedWalk.length, true.B).asTypeOf(donotNeedWalk)
893  }.elsewhen(RegNext(io.redirect.valid)){
894    donotNeedWalk := (0 until CommitWidth).map(i => (i.U < walkPtrLowBits))
895  }.otherwise{
896    donotNeedWalk := 0.U.asTypeOf(donotNeedWalk)
897  }
898  walkDestSizeDeqGroup.zip(walkPtrVec_next).map {
899    case (reg, ptrNext) => reg := deqPtrEntry.realDestSize
900  }
901  val numValidEntries = distanceBetween(enqPtr, deqPtr)
902  val commitCnt = PopCount(io.commits.commitValid)
903
904  allowEnqueue := numValidEntries + dispatchNum <= (RobSize - RenameWidth).U
905  allowEnqueueForDispatch := numValidEntries + dispatchNum <= (RobSize - 2 * RenameWidth).U
906
907  val redirectWalkDistance = distanceBetween(io.redirect.bits.robIdx, deqPtrVec_next(0))
908  when(io.redirect.valid) {
909    lastWalkPtr := Mux(io.redirect.bits.flushItself(), io.redirect.bits.robIdx - 1.U, io.redirect.bits.robIdx)
910  }
911
912
913  /**
914   * States
915   * We put all the stage bits changes here.
916   *
917   * All events: (1) enqueue (dispatch); (2) writeback; (3) cancel; (4) dequeue (commit);
918   * All states: (1) valid; (2) writebacked; (3) flagBkup
919   */
920
921  val deqPtrGroup = Wire(Vec(2 * CommitWidth, new RobPtr))
922  deqPtrGroup.zipWithIndex.map { case (deq, i) => deq := deqPtrVec(0) + i.U }
923  val commitReadAddr = Mux(state === s_idle, VecInit(deqPtrVec.map(_.value)), VecInit(walkPtrVec.map(_.value)))
924
925  val redirectValidReg = RegNext(io.redirect.valid)
926  val redirectBegin = Reg(UInt(log2Up(RobSize).W))
927  val redirectEnd = Reg(UInt(log2Up(RobSize).W))
928  val redirectAll = RegInit(false.B)
929  when(io.redirect.valid){
930    redirectBegin := Mux(io.redirect.bits.flushItself(), io.redirect.bits.robIdx.value - 1.U, io.redirect.bits.robIdx.value)
931    redirectEnd := enqPtr.value
932    redirectAll := io.redirect.bits.flushItself() && (io.redirect.bits.robIdx.value === enqPtr.value) && (io.redirect.bits.robIdx.flag ^ enqPtr.flag)
933  }
934
935  // update robEntries valid
936  for (i <- 0 until RobSize) {
937    val enqOH = VecInit(canEnqueue.zip(allocatePtrVec.map(_.value === i.U)).map(x => x._1 && x._2))
938    val commitCond = io.commits.isCommit && io.commits.commitValid.zip(deqPtrVec.map(_.value === i.U)).map(x => x._1 && x._2).reduce(_ || _)
939    assert(PopCount(enqOH) < 2.U, s"robEntries$i enqOH is not one hot")
940    val needFlush = redirectValidReg && (Mux(
941      redirectEnd > redirectBegin,
942      (i.U > redirectBegin) && (i.U < redirectEnd),
943      (i.U > redirectBegin) || (i.U < redirectEnd)
944    ) || redirectAll)
945    when(commitCond) {
946      robEntries(i).valid := false.B
947    }.elsewhen(enqOH.asUInt.orR && !io.redirect.valid) {
948      robEntries(i).valid := true.B
949    }.elsewhen(needFlush){
950      robEntries(i).valid := false.B
951    }
952  }
953
954  // debug_inst update
955  for (i <- 0 until (LduCnt + StaCnt)) {
956    debug_lsInfo(io.debug_ls.debugLsInfo(i).s1_robIdx).s1SignalEnable(io.debug_ls.debugLsInfo(i))
957    debug_lsInfo(io.debug_ls.debugLsInfo(i).s2_robIdx).s2SignalEnable(io.debug_ls.debugLsInfo(i))
958    debug_lsInfo(io.debug_ls.debugLsInfo(i).s3_robIdx).s3SignalEnable(io.debug_ls.debugLsInfo(i))
959  }
960  for (i <- 0 until LduCnt) {
961    debug_lsTopdownInfo(io.lsTopdownInfo(i).s1.robIdx).s1SignalEnable(io.lsTopdownInfo(i))
962    debug_lsTopdownInfo(io.lsTopdownInfo(i).s2.robIdx).s2SignalEnable(io.lsTopdownInfo(i))
963  }
964
965  // status field: writebacked
966  // enqueue logic set 6 writebacked to false
967
968  // writeback logic set numWbPorts writebacked to true
969
970  // if the first uop of an instruction is valid , write writebackedCounter
971  val uopEnqValidSeq = io.enq.req.map(req => io.enq.canAccept && req.valid)
972  val instEnqValidSeq = io.enq.req.map(req => io.enq.canAccept && req.valid && req.bits.firstUop)
973  val enqNeedWriteRFSeq = io.enq.req.map(_.bits.needWriteRf)
974  val enqHasExcpSeq = io.enq.req.map(_.bits.hasException)
975  val enqRobIdxSeq = io.enq.req.map(req => req.bits.robIdx.value)
976  val enqUopNumVec = VecInit(io.enq.req.map(req => req.bits.numUops))
977  val enqWBNumVec = VecInit(io.enq.req.map(req => req.bits.numWB))
978
979  private val enqWriteStdVec: Vec[Bool] = VecInit(io.enq.req.map {
980    req => FuType.isStore(req.bits.fuType)
981  })
982  val fflags_wb = fflagsWBs
983  val vxsat_wb = vxsatWBs
984  for (i <- 0 until RobSize) {
985
986    val robIdxMatchSeq = io.enq.req.map(_.bits.robIdx.value === i.U)
987    val uopCanEnqSeq = uopEnqValidSeq.zip(robIdxMatchSeq).map { case (valid, isMatch) => valid && isMatch }
988    val instCanEnqSeq = instEnqValidSeq.zip(robIdxMatchSeq).map { case (valid, isMatch) => valid && isMatch }
989    val instCanEnqFlag = Cat(instCanEnqSeq).orR
990    val hasExcpSeq = enqHasExcpSeq.lazyZip(robIdxMatchSeq).lazyZip(uopEnqValidSeq).map { case (excp, isMatch, valid) => excp && isMatch && valid }
991    val hasExcpFlag = Cat(hasExcpSeq).orR
992    val isFirstEnq = !robEntries(i).valid && instCanEnqFlag
993    val realDestEnqNum = PopCount(enqNeedWriteRFSeq.zip(uopCanEnqSeq).map { case (writeFlag, valid) => writeFlag && valid })
994    when(isFirstEnq){
995      robEntries(i).realDestSize := realDestEnqNum //Mux(hasExcpFlag, 0.U, realDestEnqNum)
996    }.elsewhen(robEntries(i).valid && Cat(uopCanEnqSeq).orR){
997      robEntries(i).realDestSize := robEntries(i).realDestSize + realDestEnqNum
998    }
999    val enqUopNum = PriorityMux(instCanEnqSeq, enqUopNumVec)
1000    val enqWBNum = PriorityMux(instCanEnqSeq, enqWBNumVec)
1001    val enqWriteStd = PriorityMux(instCanEnqSeq, enqWriteStdVec)
1002
1003    val canWbSeq = exuWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U)
1004    val canStdWbSeq = VecInit(stdWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U))
1005    val wbCnt = Mux1H(canWbSeq, io.writebackNums.map(_.bits))
1006
1007    val canWbExceptionSeq = exceptionWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U)
1008    val needFlush = robEntries(i).needFlush
1009    val needFlushWriteBack = Wire(Bool())
1010    needFlushWriteBack := Mux1H(canWbExceptionSeq, io.writebackNeedFlush)
1011    when(robEntries(i).valid){
1012      needFlush := needFlush || needFlushWriteBack
1013    }
1014
1015    when(robEntries(i).valid && (needFlush || needFlushWriteBack)) {
1016      // exception flush
1017      robEntries(i).uopNum := robEntries(i).uopNum - wbCnt
1018      robEntries(i).stdWritebacked := true.B
1019    }.elsewhen(!robEntries(i).valid && instCanEnqFlag) {
1020      // enq set num of uops
1021      robEntries(i).uopNum := enqWBNum
1022      robEntries(i).stdWritebacked := Mux(enqWriteStd, false.B, true.B)
1023    }.elsewhen(robEntries(i).valid) {
1024      // update by writing back
1025      robEntries(i).uopNum := robEntries(i).uopNum - wbCnt
1026      assert(!(robEntries(i).uopNum - wbCnt > robEntries(i).uopNum), s"robEntries $i uopNum is overflow!")
1027      when(canStdWbSeq.asUInt.orR) {
1028        robEntries(i).stdWritebacked := true.B
1029      }
1030    }
1031
1032    val fflagsCanWbSeq = fflags_wb.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U && writeback.bits.wflags.getOrElse(false.B))
1033    val fflagsRes = fflagsCanWbSeq.zip(fflags_wb).map { case (canWb, wb) => Mux(canWb, wb.bits.fflags.get, 0.U) }.fold(false.B)(_ | _)
1034    when(isFirstEnq) {
1035      robEntries(i).fflags := 0.U
1036    }.elsewhen(fflagsRes.orR) {
1037      robEntries(i).fflags := robEntries(i).fflags | fflagsRes
1038    }
1039
1040    val vxsatCanWbSeq = vxsat_wb.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U)
1041    val vxsatRes = vxsatCanWbSeq.zip(vxsat_wb).map { case (canWb, wb) => Mux(canWb, wb.bits.vxsat.get, 0.U) }.fold(false.B)(_ | _)
1042    when(isFirstEnq) {
1043      robEntries(i).vxsat := 0.U
1044    }.elsewhen(vxsatRes.orR) {
1045      robEntries(i).vxsat := robEntries(i).vxsat | vxsatRes
1046    }
1047
1048    // trace
1049    val taken = branchWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === i.U && writeback.bits.redirect.get.bits.cfiUpdate.taken).reduce(_ || _)
1050    when(robEntries(i).valid && Itype.isBranchType(robEntries(i).traceBlockInPipe.itype) && taken){
1051      // BranchType code(notaken itype = 4) must be correctly replaced!
1052      robEntries(i).traceBlockInPipe.itype := Itype.Taken
1053    }
1054  }
1055
1056  // begin update robBanksRdata
1057  val robBanksRdata = VecInit(robBanksRdataThisLine ++ robBanksRdataNextLine)
1058  val needUpdate = Wire(Vec(2 * CommitWidth, new RobEntryBundle))
1059  needUpdate := VecInit(robBanksRdataThisLine ++ robBanksRdataNextLine)
1060  val needUpdateRobIdx = robIdxThisLine ++ robIdxNextLine
1061  for (i <- 0 until 2 * CommitWidth) {
1062    val robIdxMatchSeq = io.enq.req.map(_.bits.robIdx.value === needUpdateRobIdx(i))
1063    val uopCanEnqSeq = uopEnqValidSeq.zip(robIdxMatchSeq).map { case (valid, isMatch) => valid && isMatch }
1064    val instCanEnqSeq = instEnqValidSeq.zip(robIdxMatchSeq).map { case (valid, isMatch) => valid && isMatch }
1065    val instCanEnqFlag = Cat(instCanEnqSeq).orR
1066    val realDestEnqNum = PopCount(enqNeedWriteRFSeq.zip(uopCanEnqSeq).map { case (writeFlag, valid) => writeFlag && valid })
1067    when(!needUpdate(i).valid && instCanEnqFlag) {
1068      needUpdate(i).realDestSize := realDestEnqNum
1069    }.elsewhen(needUpdate(i).valid && instCanEnqFlag) {
1070      needUpdate(i).realDestSize := robBanksRdata(i).realDestSize + realDestEnqNum
1071    }
1072    val enqUopNum = PriorityMux(instCanEnqSeq, enqUopNumVec)
1073    val enqWBNum = PriorityMux(instCanEnqSeq, enqWBNumVec)
1074    val enqWriteStd = PriorityMux(instCanEnqSeq, enqWriteStdVec)
1075
1076    val canWbSeq = exuWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === needUpdateRobIdx(i))
1077    val canStdWbSeq = VecInit(stdWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === needUpdateRobIdx(i)))
1078    val wbCnt = Mux1H(canWbSeq, io.writebackNums.map(_.bits))
1079
1080    val canWbExceptionSeq = exceptionWBs.map(writeback => writeback.valid && (writeback.bits.robIdx.value === needUpdateRobIdx(i)))
1081    val needFlush = robBanksRdata(i).needFlush
1082    val needFlushWriteBack = Wire(Bool())
1083    needFlushWriteBack := Mux1H(canWbExceptionSeq, io.writebackNeedFlush)
1084    when(needUpdate(i).valid) {
1085      needUpdate(i).needFlush := needFlush || needFlushWriteBack
1086    }
1087
1088    when(needUpdate(i).valid && (needFlush || needFlushWriteBack)) {
1089      // exception flush
1090      needUpdate(i).uopNum := robBanksRdata(i).uopNum - wbCnt
1091      needUpdate(i).stdWritebacked := true.B
1092    }.elsewhen(!needUpdate(i).valid && instCanEnqFlag) {
1093      // enq set num of uops
1094      needUpdate(i).uopNum := enqWBNum
1095      needUpdate(i).stdWritebacked := Mux(enqWriteStd, false.B, true.B)
1096    }.elsewhen(needUpdate(i).valid) {
1097      // update by writing back
1098      needUpdate(i).uopNum := robBanksRdata(i).uopNum - wbCnt
1099      when(canStdWbSeq.asUInt.orR) {
1100        needUpdate(i).stdWritebacked := true.B
1101      }
1102    }
1103
1104    val fflagsCanWbSeq = fflags_wb.map(writeback => writeback.valid && writeback.bits.robIdx.value === needUpdateRobIdx(i) && writeback.bits.wflags.getOrElse(false.B))
1105    val fflagsRes = fflagsCanWbSeq.zip(fflags_wb).map { case (canWb, wb) => Mux(canWb, wb.bits.fflags.get, 0.U) }.fold(false.B)(_ | _)
1106    needUpdate(i).fflags := Mux(!robBanksRdata(i).valid && instCanEnqFlag, 0.U, robBanksRdata(i).fflags | fflagsRes)
1107
1108    val vxsatCanWbSeq = vxsat_wb.map(writeback => writeback.valid && writeback.bits.robIdx.value === needUpdateRobIdx(i))
1109    val vxsatRes = vxsatCanWbSeq.zip(vxsat_wb).map { case (canWb, wb) => Mux(canWb, wb.bits.vxsat.get, 0.U) }.fold(false.B)(_ | _)
1110    needUpdate(i).vxsat := Mux(!robBanksRdata(i).valid && instCanEnqFlag, 0.U, robBanksRdata(i).vxsat | vxsatRes)
1111
1112    // trace
1113    val taken = branchWBs.map(writeback => writeback.valid && writeback.bits.robIdx.value === needUpdateRobIdx(i) && writeback.bits.redirect.get.bits.cfiUpdate.taken).reduce(_ || _)
1114    when(robBanksRdata(i).valid && Itype.isBranchType(robBanksRdata(i).traceBlockInPipe.itype) && taken){
1115      // BranchType code(notaken itype = 4) must be correctly replaced!
1116      needUpdate(i).traceBlockInPipe.itype := Itype.Taken
1117    }
1118  }
1119  robBanksRdataThisLineUpdate := VecInit(needUpdate.take(8))
1120  robBanksRdataNextLineUpdate := VecInit(needUpdate.drop(8))
1121  // end update robBanksRdata
1122
1123  // interrupt_safe
1124  for (i <- 0 until RenameWidth) {
1125    when(canEnqueue(i)) {
1126      // For now, we allow non-load-store instructions to trigger interrupts
1127      // For MMIO instructions, they should not trigger interrupts since they may
1128      // be sent to lower level before it writes back.
1129      // However, we cannot determine whether a load/store instruction is MMIO.
1130      // Thus, we don't allow load/store instructions to trigger an interrupt.
1131      // TODO: support non-MMIO load-store instructions to trigger interrupts
1132      val allow_interrupts = !CommitType.isLoadStore(io.enq.req(i).bits.commitType) && !FuType.isFence(io.enq.req(i).bits.fuType) && !FuType.isCsr(io.enq.req(i).bits.fuType) && !FuType.isVset(io.enq.req(i).bits.fuType)
1133      robEntries(allocatePtrVec(i).value).interrupt_safe := allow_interrupts
1134    }
1135  }
1136
1137  /**
1138   * read and write of data modules
1139   */
1140  val commitReadAddr_next = Mux(state_next === s_idle,
1141    VecInit(deqPtrVec_next.map(_.value)),
1142    VecInit(walkPtrVec_next.map(_.value))
1143  )
1144
1145  exceptionGen.io.redirect <> io.redirect
1146  exceptionGen.io.flush := io.flushOut.valid
1147
1148  val canEnqueueEG = VecInit(io.enq.req.map(req => req.valid && io.enq.canAccept))
1149  for (i <- 0 until RenameWidth) {
1150    exceptionGen.io.enq(i).valid := canEnqueueEG(i)
1151    exceptionGen.io.enq(i).bits.robIdx := io.enq.req(i).bits.robIdx
1152    exceptionGen.io.enq(i).bits.ftqPtr := io.enq.req(i).bits.ftqPtr
1153    exceptionGen.io.enq(i).bits.ftqOffset := io.enq.req(i).bits.ftqOffset
1154    exceptionGen.io.enq(i).bits.exceptionVec := ExceptionNO.selectFrontend(io.enq.req(i).bits.exceptionVec)
1155    exceptionGen.io.enq(i).bits.hasException := io.enq.req(i).bits.hasException
1156    exceptionGen.io.enq(i).bits.isEnqExcp := io.enq.req(i).bits.hasException
1157    exceptionGen.io.enq(i).bits.isFetchMalAddr := io.enq.req(i).bits.isFetchMalAddr
1158    exceptionGen.io.enq(i).bits.flushPipe := io.enq.req(i).bits.flushPipe
1159    exceptionGen.io.enq(i).bits.isVset := io.enq.req(i).bits.isVset
1160    exceptionGen.io.enq(i).bits.replayInst := false.B
1161    XSError(canEnqueue(i) && io.enq.req(i).bits.replayInst, "enq should not set replayInst")
1162    exceptionGen.io.enq(i).bits.singleStep := io.enq.req(i).bits.singleStep
1163    exceptionGen.io.enq(i).bits.crossPageIPFFix := io.enq.req(i).bits.crossPageIPFFix
1164    exceptionGen.io.enq(i).bits.trigger := io.enq.req(i).bits.trigger
1165    exceptionGen.io.enq(i).bits.vstartEn := false.B //DontCare
1166    exceptionGen.io.enq(i).bits.vstart := 0.U //DontCare
1167    exceptionGen.io.enq(i).bits.vuopIdx := 0.U
1168    exceptionGen.io.enq(i).bits.isVecLoad := false.B
1169    exceptionGen.io.enq(i).bits.isVlm := false.B
1170    exceptionGen.io.enq(i).bits.isStrided := false.B
1171    exceptionGen.io.enq(i).bits.isIndexed := false.B
1172    exceptionGen.io.enq(i).bits.isWhole := false.B
1173    exceptionGen.io.enq(i).bits.nf := 0.U
1174    exceptionGen.io.enq(i).bits.vsew := 0.U
1175    exceptionGen.io.enq(i).bits.veew := 0.U
1176    exceptionGen.io.enq(i).bits.vlmul := 0.U
1177  }
1178
1179  println(s"ExceptionGen:")
1180  println(s"num of exceptions: ${params.numException}")
1181  require(exceptionWBs.length == exceptionGen.io.wb.length,
1182    f"exceptionWBs.length: ${exceptionWBs.length}, " +
1183      f"exceptionGen.io.wb.length: ${exceptionGen.io.wb.length}")
1184  for (((wb, exc_wb), i) <- exceptionWBs.zip(exceptionGen.io.wb).zipWithIndex) {
1185    exc_wb.valid       := wb.valid
1186    exc_wb.bits.robIdx := wb.bits.robIdx
1187    // only enq inst use ftqPtr to read gpa
1188    exc_wb.bits.ftqPtr          := 0.U.asTypeOf(exc_wb.bits.ftqPtr)
1189    exc_wb.bits.ftqOffset       := 0.U.asTypeOf(exc_wb.bits.ftqOffset)
1190    exc_wb.bits.exceptionVec    := wb.bits.exceptionVec.get
1191    exc_wb.bits.hasException    := wb.bits.exceptionVec.get.asUInt.orR // Todo: use io.writebackNeedFlush(i) instead
1192    exc_wb.bits.isEnqExcp       := false.B
1193    exc_wb.bits.isFetchMalAddr  := false.B
1194    exc_wb.bits.flushPipe       := wb.bits.flushPipe.getOrElse(false.B)
1195    exc_wb.bits.isVset          := false.B
1196    exc_wb.bits.replayInst      := wb.bits.replay.getOrElse(false.B)
1197    exc_wb.bits.singleStep      := false.B
1198    exc_wb.bits.crossPageIPFFix := false.B
1199    val trigger = wb.bits.trigger.getOrElse(TriggerAction.None).asTypeOf(exc_wb.bits.trigger)
1200    exc_wb.bits.trigger := trigger
1201    exc_wb.bits.vstartEn := (if (wb.bits.vls.nonEmpty) wb.bits.exceptionVec.get.asUInt.orR || TriggerAction.isDmode(trigger) else 0.U)
1202    exc_wb.bits.vstart := (if (wb.bits.vls.nonEmpty) wb.bits.vls.get.vpu.vstart else 0.U)
1203    exc_wb.bits.vuopIdx := (if (wb.bits.vls.nonEmpty) wb.bits.vls.get.vpu.vuopIdx else 0.U)
1204    exc_wb.bits.isVecLoad := wb.bits.vls.map(_.isVecLoad).getOrElse(false.B)
1205    exc_wb.bits.isVlm := wb.bits.vls.map(_.isVlm).getOrElse(false.B)
1206    exc_wb.bits.isStrided := wb.bits.vls.map(_.isStrided).getOrElse(false.B) // strided need two mode tmp vreg
1207    exc_wb.bits.isIndexed := wb.bits.vls.map(_.isIndexed).getOrElse(false.B) // indexed and nf=0 need non-sequential uopidx -> vdidx
1208    exc_wb.bits.isWhole := wb.bits.vls.map(_.isWhole).getOrElse(false.B) // indexed and nf=0 need non-sequential uopidx -> vdidx
1209    exc_wb.bits.nf := wb.bits.vls.map(_.vpu.nf).getOrElse(0.U)
1210    exc_wb.bits.vsew := wb.bits.vls.map(_.vpu.vsew).getOrElse(0.U)
1211    exc_wb.bits.veew := wb.bits.vls.map(_.vpu.veew).getOrElse(0.U)
1212    exc_wb.bits.vlmul := wb.bits.vls.map(_.vpu.vlmul).getOrElse(0.U)
1213  }
1214
1215  fflagsDataRead := (0 until CommitWidth).map(i => robEntries(deqPtrVec(i).value).fflags)
1216  vxsatDataRead := (0 until CommitWidth).map(i => robEntries(deqPtrVec(i).value).vxsat)
1217
1218  val isCommit = io.commits.isCommit
1219  val isCommitReg = GatedValidRegNext(io.commits.isCommit)
1220  val instrCntReg = RegInit(0.U(64.W))
1221  val fuseCommitCnt = PopCount(io.commits.commitValid.zip(io.commits.info).map { case (v, i) => RegEnable(v && CommitType.isFused(i.commitType), isCommit) })
1222  val trueCommitCnt = RegEnable(io.commits.commitValid.zip(io.commits.info).map { case (v, i) => Mux(v, i.instrSize, 0.U) }.reduce(_ +& _), isCommit) +& fuseCommitCnt
1223  val retireCounter = Mux(isCommitReg, trueCommitCnt, 0.U)
1224  val instrCnt = instrCntReg + retireCounter
1225  when(isCommitReg){
1226    instrCntReg := instrCnt
1227  }
1228  io.csr.perfinfo.retiredInstr := retireCounter
1229  io.robFull := !allowEnqueue
1230  io.headNotReady := commit_vDeqGroup(deqPtr.value(bankNumWidth-1, 0)) && !commit_wDeqGroup(deqPtr.value(bankNumWidth-1, 0))
1231
1232  io.toVecExcpMod.logicPhyRegMap := rab.io.toVecExcpMod.logicPhyRegMap
1233  io.toVecExcpMod.excpInfo := vecExcpInfo
1234
1235  /**
1236   * trace
1237   */
1238
1239  // trace output
1240  val traceValids = io.trace.traceCommitInfo.blocks.map(_.valid)
1241  val traceBlocks = io.trace.traceCommitInfo.blocks
1242  val traceBlockInPipe = io.trace.traceCommitInfo.blocks.map(_.bits.tracePipe)
1243
1244  // The reg 'isTraceXret' only for trace xret instructions. xret only occur in block(0).
1245  val isTraceXret = RegInit(false.B)
1246  when(io.csr.isXRet){
1247    isTraceXret := true.B
1248  }.elsewhen(isTraceXret && io.commits.isCommit && io.commits.commitValid(0)){
1249    isTraceXret := false.B
1250  }
1251
1252  for (i <- 0 until CommitWidth) {
1253    traceBlocks(i).bits.ftqIdx.foreach(_ := rawInfo(i).ftqIdx)
1254    traceBlocks(i).bits.ftqOffset.foreach(_ := rawInfo(i).ftqOffset)
1255    traceBlockInPipe(i).itype := rawInfo(i).traceBlockInPipe.itype
1256    traceBlockInPipe(i).iretire := rawInfo(i).traceBlockInPipe.iretire
1257    traceBlockInPipe(i).ilastsize := rawInfo(i).traceBlockInPipe.ilastsize
1258    traceValids(i) := io.commits.isCommit && io.commits.commitValid(i)
1259    // exception/xret only occur in block(0).
1260    if(i == 0) {
1261      when(isTraceXret && io.commits.isCommit && io.commits.commitValid(0)){ // trace xret
1262        traceBlocks(i).bits.tracePipe.itype := Itype.ExpIntReturn
1263      }.elsewhen(io.exception.valid){ // trace exception
1264        traceBlocks(i).bits.tracePipe.itype := Mux(io.exception.bits.isInterrupt,
1265          Itype.Interrupt,
1266          Itype.Exception
1267        )
1268        traceValids(i) := true.B
1269        traceBlockInPipe(i).iretire := 0.U
1270      }
1271    }
1272  }
1273
1274  /**
1275   * debug info
1276   */
1277  XSDebug(p"enqPtr ${enqPtr} deqPtr ${deqPtr}\n")
1278  XSDebug("")
1279  XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n")
1280  for (i <- 0 until RobSize) {
1281    XSDebug(false, !robEntries(i).valid, "-")
1282    XSDebug(false, robEntries(i).valid && robEntries(i).isWritebacked, "w")
1283    XSDebug(false, robEntries(i).valid && !robEntries(i).isWritebacked, "v")
1284  }
1285  XSDebug(false, true.B, "\n")
1286
1287  for (i <- 0 until RobSize) {
1288    if (i % 4 == 0) XSDebug("")
1289    XSDebug(false, true.B, "%x ", debug_microOp(i).pc)
1290    XSDebug(false, !robEntries(i).valid, "- ")
1291    XSDebug(false, robEntries(i).valid && robEntries(i).isWritebacked, "w ")
1292    XSDebug(false, robEntries(i).valid && !robEntries(i).isWritebacked, "v ")
1293    if (i % 4 == 3) XSDebug(false, true.B, "\n")
1294  }
1295
1296  def ifCommit(counter: UInt): UInt = Mux(isCommit, counter, 0.U)
1297
1298  def ifCommitReg(counter: UInt): UInt = Mux(isCommitReg, counter, 0.U)
1299
1300  val commitDebugUop = deqPtrVec.map(_.value).map(debug_microOp(_))
1301  XSPerfAccumulate("clock_cycle", 1.U, XSPerfLevel.CRITICAL)
1302  QueuePerf(RobSize, numValidEntries, numValidEntries === RobSize.U)
1303  XSPerfAccumulate("commitUop", ifCommit(commitCnt))
1304  XSPerfAccumulate("commitInstr", ifCommitReg(trueCommitCnt), XSPerfLevel.CRITICAL)
1305  XSPerfRolling("ipc", ifCommitReg(trueCommitCnt), 1000, clock, reset)
1306  XSPerfRolling("cpi", perfCnt = 1.U /*Cycle*/ , eventTrigger = ifCommitReg(trueCommitCnt), granularity = 1000, clock, reset)
1307  XSPerfAccumulate("commitInstrFused", ifCommitReg(fuseCommitCnt))
1308  val commitIsLoad = io.commits.info.map(_.commitType).map(_ === CommitType.LOAD)
1309  val commitLoadValid = io.commits.commitValid.zip(commitIsLoad).map { case (v, t) => v && t }
1310  XSPerfAccumulate("commitInstrLoad", ifCommit(PopCount(commitLoadValid)))
1311  val commitIsBranch = io.commits.info.map(_.commitType).map(_ === CommitType.BRANCH)
1312  val commitBranchValid = io.commits.commitValid.zip(commitIsBranch).map { case (v, t) => v && t }
1313  XSPerfAccumulate("commitInstrBranch", ifCommit(PopCount(commitBranchValid)))
1314  val commitIsStore = io.commits.info.map(_.commitType).map(_ === CommitType.STORE)
1315  XSPerfAccumulate("commitInstrStore", ifCommit(PopCount(io.commits.commitValid.zip(commitIsStore).map { case (v, t) => v && t })))
1316  XSPerfAccumulate("writeback", PopCount((0 until RobSize).map(i => robEntries(i).valid && robEntries(i).isWritebacked)))
1317  // XSPerfAccumulate("enqInstr", PopCount(io.dp1Req.map(_.fire)))
1318  // XSPerfAccumulate("d2rVnR", PopCount(io.dp1Req.map(p => p.valid && !p.ready)))
1319  XSPerfAccumulate("walkInstr", Mux(io.commits.isWalk, PopCount(io.commits.walkValid), 0.U))
1320  XSPerfAccumulate("walkCycleTotal", state === s_walk)
1321  XSPerfAccumulate("waitRabWalkEnd", state === s_walk && walkFinished && !rab.io.status.walkEnd)
1322  private val walkCycle = RegInit(0.U(8.W))
1323  private val waitRabWalkCycle = RegInit(0.U(8.W))
1324  walkCycle := Mux(io.redirect.valid, 0.U, Mux(state === s_walk, walkCycle + 1.U, 0.U))
1325  waitRabWalkCycle := Mux(state === s_walk && walkFinished, 0.U, Mux(state === s_walk, walkCycle + 1.U, 0.U))
1326
1327  XSPerfHistogram("walkRobCycleHist", walkCycle, state === s_walk && walkFinished, 0, 32)
1328  XSPerfHistogram("walkRabExtraCycleHist", waitRabWalkCycle, state === s_walk && walkFinished && rab.io.status.walkEnd, 0, 32)
1329  XSPerfHistogram("walkTotalCycleHist", walkCycle, state === s_walk && state_next === s_idle, 0, 32)
1330
1331  private val deqNotWritebacked = robEntries(deqPtr.value).valid && !robEntries(deqPtr.value).isWritebacked
1332  private val deqStdNotWritebacked = robEntries(deqPtr.value).valid && !robEntries(deqPtr.value).stdWritebacked
1333  private val deqUopNotWritebacked = robEntries(deqPtr.value).valid && !robEntries(deqPtr.value).isUopWritebacked
1334  private val deqHeadInfo = debug_microOp(deqPtr.value)
1335  val deqUopCommitType = debug_microOp(deqPtr.value).commitType
1336
1337  XSPerfAccumulate("waitAluCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.alu.U)
1338  XSPerfAccumulate("waitMulCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.mul.U)
1339  XSPerfAccumulate("waitDivCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.div.U)
1340  XSPerfAccumulate("waitBrhCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.brh.U)
1341  XSPerfAccumulate("waitJmpCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.jmp.U)
1342  XSPerfAccumulate("waitCsrCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.csr.U)
1343  XSPerfAccumulate("waitFenCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.fence.U)
1344  XSPerfAccumulate("waitBkuCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.bku.U)
1345  XSPerfAccumulate("waitLduCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.ldu.U)
1346  XSPerfAccumulate("waitStuCycle", deqNotWritebacked && deqHeadInfo.fuType === FuType.stu.U)
1347  XSPerfAccumulate("waitStaCycle", deqUopNotWritebacked && deqHeadInfo.fuType === FuType.stu.U)
1348  XSPerfAccumulate("waitStdCycle", deqStdNotWritebacked && deqHeadInfo.fuType === FuType.stu.U)
1349  XSPerfAccumulate("waitAtmCycle", deqStdNotWritebacked && deqHeadInfo.fuType === FuType.mou.U)
1350
1351  XSPerfAccumulate("waitVfaluCycle", deqStdNotWritebacked && deqHeadInfo.fuType === FuType.vfalu.U)
1352  XSPerfAccumulate("waitVfmaCycle", deqStdNotWritebacked && deqHeadInfo.fuType === FuType.vfma.U)
1353  XSPerfAccumulate("waitVfdivCycle", deqStdNotWritebacked && deqHeadInfo.fuType === FuType.vfdiv.U)
1354
1355  val vfalufuop = Seq(VfaluType.vfadd, VfaluType.vfwadd, VfaluType.vfwadd_w, VfaluType.vfsub, VfaluType.vfwsub, VfaluType.vfwsub_w, VfaluType.vfmin, VfaluType.vfmax,
1356    VfaluType.vfmerge, VfaluType.vfmv, VfaluType.vfsgnj, VfaluType.vfsgnjn, VfaluType.vfsgnjx, VfaluType.vfeq, VfaluType.vfne, VfaluType.vflt, VfaluType.vfle, VfaluType.vfgt,
1357    VfaluType.vfge, VfaluType.vfclass, VfaluType.vfmv_f_s, VfaluType.vfmv_s_f, VfaluType.vfredusum, VfaluType.vfredmax, VfaluType.vfredmin, VfaluType.vfredosum, VfaluType.vfwredosum)
1358
1359  vfalufuop.zipWithIndex.map{
1360    case(fuoptype,i) =>  XSPerfAccumulate(s"waitVfalu_${i}Cycle", deqStdNotWritebacked && deqHeadInfo.fuOpType === fuoptype && deqHeadInfo.fuType === FuType.vfalu.U)
1361  }
1362
1363
1364
1365  XSPerfAccumulate("waitNormalCycle", deqNotWritebacked && deqUopCommitType === CommitType.NORMAL)
1366  XSPerfAccumulate("waitBranchCycle", deqNotWritebacked && deqUopCommitType === CommitType.BRANCH)
1367  XSPerfAccumulate("waitLoadCycle", deqNotWritebacked && deqUopCommitType === CommitType.LOAD)
1368  XSPerfAccumulate("waitStoreCycle", deqNotWritebacked && deqUopCommitType === CommitType.STORE)
1369  XSPerfAccumulate("robHeadPC", io.commits.info(0).debug_pc.getOrElse(0.U))
1370  XSPerfAccumulate("commitCompressCntAll", PopCount(io.commits.commitValid.zip(io.commits.info).map { case (valid, info) => io.commits.isCommit && valid && info.instrSize > 1.U }))
1371  (2 to RenameWidth).foreach(i =>
1372    XSPerfAccumulate(s"commitCompressCnt${i}", PopCount(io.commits.commitValid.zip(io.commits.info).map { case (valid, info) => io.commits.isCommit && valid && info.instrSize === i.U }))
1373  )
1374  XSPerfAccumulate("compressSize", io.commits.commitValid.zip(io.commits.info).map { case (valid, info) => Mux(io.commits.isCommit && valid && info.instrSize > 1.U, info.instrSize, 0.U) }.reduce(_ +& _))
1375  val dispatchLatency = commitDebugUop.map(uop => uop.debugInfo.dispatchTime - uop.debugInfo.renameTime)
1376  val enqRsLatency = commitDebugUop.map(uop => uop.debugInfo.enqRsTime - uop.debugInfo.dispatchTime)
1377  val selectLatency = commitDebugUop.map(uop => uop.debugInfo.selectTime - uop.debugInfo.enqRsTime)
1378  val issueLatency = commitDebugUop.map(uop => uop.debugInfo.issueTime - uop.debugInfo.selectTime)
1379  val executeLatency = commitDebugUop.map(uop => uop.debugInfo.writebackTime - uop.debugInfo.issueTime)
1380  val rsFuLatency = commitDebugUop.map(uop => uop.debugInfo.writebackTime - uop.debugInfo.enqRsTime)
1381  val commitLatency = commitDebugUop.map(uop => timer - uop.debugInfo.writebackTime)
1382
1383  def latencySum(cond: Seq[Bool], latency: Seq[UInt]): UInt = {
1384    cond.zip(latency).map(x => Mux(x._1, x._2, 0.U)).reduce(_ +& _)
1385  }
1386
1387  for (fuType <- FuType.functionNameMap.keys) {
1388    val fuName = FuType.functionNameMap(fuType)
1389    val commitIsFuType = io.commits.commitValid.zip(commitDebugUop).map(x => x._1 && x._2.fuType === fuType.U)
1390    XSPerfRolling(s"ipc_futype_${fuName}", ifCommit(PopCount(commitIsFuType)), 1000, clock, reset)
1391    XSPerfAccumulate(s"${fuName}_instr_cnt", ifCommit(PopCount(commitIsFuType)))
1392    XSPerfAccumulate(s"${fuName}_latency_dispatch", ifCommit(latencySum(commitIsFuType, dispatchLatency)))
1393    XSPerfAccumulate(s"${fuName}_latency_enq_rs", ifCommit(latencySum(commitIsFuType, enqRsLatency)))
1394    XSPerfAccumulate(s"${fuName}_latency_select", ifCommit(latencySum(commitIsFuType, selectLatency)))
1395    XSPerfAccumulate(s"${fuName}_latency_issue", ifCommit(latencySum(commitIsFuType, issueLatency)))
1396    XSPerfAccumulate(s"${fuName}_latency_execute", ifCommit(latencySum(commitIsFuType, executeLatency)))
1397    XSPerfAccumulate(s"${fuName}_latency_enq_rs_execute", ifCommit(latencySum(commitIsFuType, rsFuLatency)))
1398    XSPerfAccumulate(s"${fuName}_latency_commit", ifCommit(latencySum(commitIsFuType, commitLatency)))
1399  }
1400  XSPerfAccumulate(s"redirect_use_snapshot", io.redirect.valid && io.snpt.useSnpt)
1401
1402  // top-down info
1403  io.debugTopDown.toCore.robHeadVaddr.valid := debug_lsTopdownInfo(deqPtr.value).s1.vaddr_valid
1404  io.debugTopDown.toCore.robHeadVaddr.bits := debug_lsTopdownInfo(deqPtr.value).s1.vaddr_bits
1405  io.debugTopDown.toCore.robHeadPaddr.valid := debug_lsTopdownInfo(deqPtr.value).s2.paddr_valid
1406  io.debugTopDown.toCore.robHeadPaddr.bits := debug_lsTopdownInfo(deqPtr.value).s2.paddr_bits
1407  io.debugTopDown.toDispatch.robTrueCommit := ifCommitReg(trueCommitCnt)
1408  io.debugTopDown.toDispatch.robHeadLsIssue := debug_lsIssue(deqPtr.value)
1409  io.debugTopDown.robHeadLqIdx.valid := debug_lqIdxValid(deqPtr.value)
1410  io.debugTopDown.robHeadLqIdx.bits := debug_microOp(deqPtr.value).lqIdx
1411
1412  // rolling
1413  io.debugRolling.robTrueCommit := ifCommitReg(trueCommitCnt)
1414
1415  /**
1416   * DataBase info:
1417   * log trigger is at writeback valid
1418   * */
1419  if (!env.FPGAPlatform) {
1420    val instTableName = "InstTable" + p(XSCoreParamsKey).HartId.toString
1421    val instSiteName = "Rob" + p(XSCoreParamsKey).HartId.toString
1422    val debug_instTable = ChiselDB.createTable(instTableName, new InstInfoEntry)
1423    for (wb <- exuWBs) {
1424      when(wb.valid) {
1425        val debug_instData = Wire(new InstInfoEntry)
1426        val idx = wb.bits.robIdx.value
1427        debug_instData.robIdx := idx
1428        debug_instData.dvaddr := wb.bits.debug.vaddr
1429        debug_instData.dpaddr := wb.bits.debug.paddr
1430        debug_instData.issueTime := wb.bits.debugInfo.issueTime
1431        debug_instData.writebackTime := wb.bits.debugInfo.writebackTime
1432        debug_instData.dispatchLatency := wb.bits.debugInfo.dispatchTime - wb.bits.debugInfo.renameTime
1433        debug_instData.enqRsLatency := wb.bits.debugInfo.enqRsTime - wb.bits.debugInfo.dispatchTime
1434        debug_instData.selectLatency := wb.bits.debugInfo.selectTime - wb.bits.debugInfo.enqRsTime
1435        debug_instData.issueLatency := wb.bits.debugInfo.issueTime - wb.bits.debugInfo.selectTime
1436        debug_instData.executeLatency := wb.bits.debugInfo.writebackTime - wb.bits.debugInfo.issueTime
1437        debug_instData.rsFuLatency := wb.bits.debugInfo.writebackTime - wb.bits.debugInfo.enqRsTime
1438        debug_instData.tlbLatency := wb.bits.debugInfo.tlbRespTime - wb.bits.debugInfo.tlbFirstReqTime
1439        debug_instData.exceptType := Cat(wb.bits.exceptionVec.getOrElse(ExceptionVec(false.B)))
1440        debug_instData.lsInfo := debug_lsInfo(idx)
1441        // debug_instData.globalID := wb.bits.uop.ctrl.debug_globalID
1442        // debug_instData.instType := wb.bits.uop.ctrl.fuType
1443        // debug_instData.ivaddr := wb.bits.uop.cf.pc
1444        // debug_instData.mdpInfo.ssid := wb.bits.uop.cf.ssid
1445        // debug_instData.mdpInfo.waitAllStore := wb.bits.uop.cf.loadWaitStrict && wb.bits.uop.cf.loadWaitBit
1446        debug_instTable.log(
1447          data = debug_instData,
1448          en = wb.valid,
1449          site = instSiteName,
1450          clock = clock,
1451          reset = reset
1452        )
1453      }
1454    }
1455  }
1456
1457  val debug_VecOtherPdest = RegInit(VecInit.fill(RobSize)(VecInit.fill(8)(0.U(PhyRegIdxWidth.W))))
1458
1459  vldWBs.map{ vldWb =>
1460    val vldWbPdest  = vldWb.bits.pdest
1461    val vldWbRobIdx = vldWb.bits.robIdx.value
1462    val vldWbvdIdx  = vldWb.bits.vls.get.vdIdx
1463    when (vldWb.fire && robEntries(vldWbRobIdx).valid && (vldWb.bits.vecWen.get || vldWb.bits.v0Wen.get)) {
1464      debug_VecOtherPdest(vldWbRobIdx)(vldWbvdIdx) := vldWbPdest
1465    }
1466  }
1467
1468  //difftest signals
1469  val firstValidCommit = (deqPtr + PriorityMux(io.commits.commitValid, VecInit(List.tabulate(CommitWidth)(_.U(log2Up(CommitWidth).W))))).value
1470
1471  val wdata = Wire(Vec(CommitWidth, UInt(XLEN.W)))
1472  val wpc = Wire(Vec(CommitWidth, UInt(XLEN.W)))
1473
1474  for (i <- 0 until CommitWidth) {
1475    val idx = deqPtrVec(i).value
1476    wdata(i) := debug_exuData(idx)
1477    wpc(i) := SignExt(commitDebugUop(i).pc, XLEN)
1478  }
1479
1480  if (env.EnableDifftest || env.AlwaysBasicDiff) {
1481    // These are the structures used by difftest only and should be optimized after synthesis.
1482    val dt_eliminatedMove = Mem(RobSize, Bool())
1483    val dt_isRVC = Mem(RobSize, Bool())
1484    val dt_exuDebug = Reg(Vec(RobSize, new DebugBundle))
1485    for (i <- 0 until RenameWidth) {
1486      when(canEnqueue(i)) {
1487        dt_eliminatedMove(allocatePtrVec(i).value) := io.enq.req(i).bits.eliminatedMove
1488        dt_isRVC(allocatePtrVec(i).value) := io.enq.req(i).bits.preDecodeInfo.isRVC
1489      }
1490    }
1491    for (wb <- exuWBs) {
1492      when(wb.valid) {
1493        val wbIdx = wb.bits.robIdx.value
1494        dt_exuDebug(wbIdx) := wb.bits.debug
1495      }
1496    }
1497    // Always instantiate basic difftest modules.
1498    for (i <- 0 until CommitWidth) {
1499      val uop = commitDebugUop(i)
1500      val commitInfo = io.commits.info(i)
1501      val ptr = deqPtrVec(i).value
1502      val exuOut = dt_exuDebug(ptr)
1503      val eliminatedMove = dt_eliminatedMove(ptr)
1504      val isRVC = dt_isRVC(ptr)
1505      val instr = uop.instr.asTypeOf(new XSInstBitFields)
1506      val isVLoad = instr.isVecLoad
1507
1508      val difftest = DifftestModule(new DiffInstrCommit(MaxPhyRegs), delay = 3, dontCare = true)
1509      val dt_skip = Mux(eliminatedMove, false.B, exuOut.isSkipDiff)
1510      difftest.coreid := io.hartId
1511      difftest.index := i.U
1512      difftest.valid := io.commits.commitValid(i) && io.commits.isCommit
1513      difftest.skip := dt_skip
1514      difftest.isRVC := isRVC
1515      difftest.rfwen := io.commits.commitValid(i) && commitInfo.rfWen && commitInfo.debug_ldest.get =/= 0.U
1516      difftest.fpwen := io.commits.commitValid(i) && uop.fpWen
1517      difftest.vecwen := io.commits.commitValid(i) && uop.vecWen
1518      difftest.v0wen := io.commits.commitValid(i) && (uop.v0Wen || isVLoad && instr.VD === 0.U)
1519      difftest.wpdest := commitInfo.debug_pdest.get
1520      difftest.wdest := Mux(isVLoad, instr.VD, commitInfo.debug_ldest.get)
1521      difftest.otherwpdest := debug_VecOtherPdest(ptr)
1522      difftest.nFused := CommitType.isFused(commitInfo.commitType).asUInt + commitInfo.instrSize - 1.U
1523      when(difftest.valid) {
1524        assert(CommitType.isFused(commitInfo.commitType).asUInt + commitInfo.instrSize >= 1.U)
1525      }
1526      if (env.EnableDifftest) {
1527        val uop = commitDebugUop(i)
1528        difftest.pc := SignExt(uop.pc, XLEN)
1529        difftest.instr := uop.instr
1530        difftest.robIdx := ZeroExt(ptr, 10)
1531        difftest.lqIdx := ZeroExt(uop.lqIdx.value, 7)
1532        difftest.sqIdx := ZeroExt(uop.sqIdx.value, 7)
1533        difftest.isLoad := io.commits.info(i).commitType === CommitType.LOAD
1534        difftest.isStore := io.commits.info(i).commitType === CommitType.STORE
1535        // Check LoadEvent only when isAmo or isLoad and skip MMIO
1536        val difftestLoadEvent = DifftestModule(new DiffLoadEvent, delay = 3)
1537        difftestLoadEvent.coreid := io.hartId
1538        difftestLoadEvent.index := i.U
1539        val loadCheck = (FuType.isAMO(uop.fuType) || FuType.isLoad(uop.fuType) || isVLoad) && !dt_skip
1540        difftestLoadEvent.valid    := io.commits.commitValid(i) && io.commits.isCommit && loadCheck
1541        difftestLoadEvent.paddr    := exuOut.paddr
1542        difftestLoadEvent.opType   := uop.fuOpType
1543        difftestLoadEvent.isAtomic := FuType.isAMO(uop.fuType)
1544        difftestLoadEvent.isLoad   := FuType.isLoad(uop.fuType)
1545        difftestLoadEvent.isVLoad  := isVLoad
1546      }
1547    }
1548  }
1549
1550  if (env.EnableDifftest || env.AlwaysBasicDiff) {
1551    val dt_isXSTrap = Mem(RobSize, Bool())
1552    for (i <- 0 until RenameWidth) {
1553      when(canEnqueue(i)) {
1554        dt_isXSTrap(allocatePtrVec(i).value) := io.enq.req(i).bits.isXSTrap
1555      }
1556    }
1557    val trapVec = io.commits.commitValid.zip(deqPtrVec).map { case (v, d) =>
1558      io.commits.isCommit && v && dt_isXSTrap(d.value)
1559    }
1560    val hitTrap = trapVec.reduce(_ || _)
1561    val difftest = DifftestModule(new DiffTrapEvent, dontCare = true)
1562    difftest.coreid := io.hartId
1563    difftest.hasTrap := hitTrap
1564    difftest.cycleCnt := timer
1565    difftest.instrCnt := instrCnt
1566    difftest.hasWFI := hasWFI
1567
1568    if (env.EnableDifftest) {
1569      val trapCode = PriorityMux(wdata.zip(trapVec).map(x => x._2 -> x._1))
1570      val trapPC = SignExt(PriorityMux(wpc.zip(trapVec).map(x => x._2 -> x._1)), XLEN)
1571      difftest.code := trapCode
1572      difftest.pc := trapPC
1573    }
1574  }
1575
1576  //store evetn difftest information
1577  io.storeDebugInfo := DontCare
1578  if (env.EnableDifftest) {
1579    io.storeDebugInfo.map{port =>
1580      port.pc := debug_microOp(port.robidx.value).pc
1581    }
1582  }
1583
1584  val brhMispred = PopCount(branchWBs.map(wb => wb.valid & wb.bits.redirect.get.valid))
1585  val jmpMispred = PopCount(jmpWBs.map(wb => wb.valid && wb.bits.redirect.get.valid))
1586  val misPred = brhMispred +& jmpMispred
1587
1588  XSPerfAccumulate("br_mis_pred", misPred)
1589
1590  val commitLoadVec = VecInit(commitLoadValid)
1591  val commitBranchVec = VecInit(commitBranchValid)
1592  val commitStoreVec = VecInit(io.commits.commitValid.zip(commitIsStore).map { case (v, t) => v && t })
1593  val perfEvents = Seq(
1594    ("rob_interrupt_num      ", io.flushOut.valid && intrEnable),
1595    ("rob_exception_num      ", io.flushOut.valid && deqHasException),
1596    ("rob_flush_pipe_num     ", io.flushOut.valid && isFlushPipe),
1597    ("rob_replay_inst_num    ", io.flushOut.valid && isFlushPipe && deqHasReplayInst),
1598    ("rob_commitUop          ", ifCommit(commitCnt)),
1599    ("rob_commitInstr        ", ifCommitReg(trueCommitCnt)),
1600    ("rob_commitInstrFused   ", ifCommitReg(fuseCommitCnt)),
1601    ("rob_commitInstrLoad    ", ifCommitReg(PopCount(RegEnable(commitLoadVec, isCommit)))),
1602    ("rob_commitInstrBranch  ", ifCommitReg(PopCount(RegEnable(commitBranchVec, isCommit)))),
1603    ("rob_commitInstrStore   ", ifCommitReg(PopCount(RegEnable(commitStoreVec, isCommit)))),
1604    ("rob_walkInstr          ", Mux(io.commits.isWalk, PopCount(io.commits.walkValid), 0.U)),
1605    ("rob_walkCycle          ", (state === s_walk)),
1606    ("rob_1_4_valid          ", numValidEntries <= (RobSize / 4).U),
1607    ("rob_2_4_valid          ", numValidEntries > (RobSize / 4).U && numValidEntries <= (RobSize / 2).U),
1608    ("rob_3_4_valid          ", numValidEntries > (RobSize / 2).U && numValidEntries <= (RobSize * 3 / 4).U),
1609    ("rob_4_4_valid          ", numValidEntries > (RobSize * 3 / 4).U),
1610    ("BR_MIS_PRED            ", misPred),
1611    ("TOTAL_FLUSH            ", io.flushOut.valid)
1612  )
1613  generatePerfEvent()
1614
1615  // max commit-stuck cycle
1616  val deqismmio = Mux(robEntries(deqPtr.value).valid, robEntries(deqPtr.value).mmio, false.B)
1617  val commitStuck = (!io.commits.commitValid.reduce(_ || _) || !io.commits.isCommit) && !deqismmio
1618  val commitStuckCycle = RegInit(0.U(log2Up(maxCommitStuck).W))
1619  when(commitStuck) {
1620    commitStuckCycle := commitStuckCycle + 1.U
1621  }.elsewhen(!commitStuck && RegNext(commitStuck)) {
1622    commitStuckCycle := 0.U
1623  }
1624  // check if stuck > 2^maxCommitStuckCycle
1625  val commitStuck_overflow = commitStuckCycle.andR && (if (wfiResume) true.B else (!hasWFI))
1626  val criticalErrors = Seq(
1627    ("rob_commit_stuck  ", commitStuck_overflow),
1628  )
1629  generateCriticalErrors()
1630
1631
1632  // dontTouch for debug
1633  if (backendParams.debugEn) {
1634    dontTouch(enqPtrVec)
1635    dontTouch(deqPtrVec)
1636    dontTouch(robEntries)
1637    dontTouch(robDeqGroup)
1638    dontTouch(robBanks)
1639    dontTouch(robBanksRaddrThisLine)
1640    dontTouch(robBanksRaddrNextLine)
1641    dontTouch(robBanksRdataThisLine)
1642    dontTouch(robBanksRdataNextLine)
1643    dontTouch(robBanksRdataThisLineUpdate)
1644    dontTouch(robBanksRdataNextLineUpdate)
1645    dontTouch(needUpdate)
1646    val exceptionWBsVec = MixedVecInit(exceptionWBs)
1647    dontTouch(exceptionWBsVec)
1648    dontTouch(commit_wDeqGroup)
1649    dontTouch(commit_vDeqGroup)
1650    dontTouch(commitSizeSumSeq)
1651    dontTouch(walkSizeSumSeq)
1652    dontTouch(commitSizeSumCond)
1653    dontTouch(walkSizeSumCond)
1654    dontTouch(commitSizeSum)
1655    dontTouch(walkSizeSum)
1656    dontTouch(realDestSizeSeq)
1657    dontTouch(walkDestSizeSeq)
1658    dontTouch(io.commits)
1659    dontTouch(commitIsVTypeVec)
1660    dontTouch(walkIsVTypeVec)
1661    dontTouch(commitValidThisLine)
1662    dontTouch(commitReadAddr_next)
1663    dontTouch(donotNeedWalk)
1664    dontTouch(walkPtrVec_next)
1665    dontTouch(walkPtrVec)
1666    dontTouch(deqPtrVec_next)
1667    dontTouch(deqPtrVecForWalk)
1668    dontTouch(snapPtrReadBank)
1669    dontTouch(snapPtrVecForWalk)
1670    dontTouch(shouldWalkVec)
1671    dontTouch(walkFinished)
1672    dontTouch(changeBankAddrToDeqPtr)
1673  }
1674  if (env.EnableDifftest) {
1675    io.commits.info.map(info => dontTouch(info.debug_pc.get))
1676  }
1677}
1678