xref: /XiangShan/src/main/scala/xiangshan/backend/rename/Rename.scala (revision 51ad03b0f625dd72200fc44bfef6a42a0563df21)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend.rename
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utility._
23import utils._
24import xiangshan._
25import xiangshan.backend.Bundles.{DecodedInst, DynInst}
26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U}
27import xiangshan.backend.fu.FuType
28import xiangshan.backend.rename.freelist._
29import xiangshan.backend.rob.{RobEnqIO, RobPtr}
30import xiangshan.mem.mdp._
31import xiangshan.ExceptionNO._
32import xiangshan.backend.fu.FuType._
33import xiangshan.mem.{EewLog2, GenUSWholeEmul}
34import xiangshan.mem.GenRealFlowNum
35import xiangshan.backend.trace._
36import xiangshan.backend.decode.isa.bitfield.{OPCODE5Bit, XSInstBitFields}
37import xiangshan.backend.fu.NewCSR.CSROoORead
38import yunsuan.{VfaluType, VipuType}
39
40class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents {
41
42  // params alias
43  private val numRegSrc = backendParams.numRegSrc
44  private val numVecRegSrc = backendParams.numVecRegSrc
45  private val numVecRatPorts = numVecRegSrc
46
47  println(s"[Rename] numRegSrc: $numRegSrc")
48
49  val io = IO(new Bundle() {
50    val redirect = Flipped(ValidIO(new Redirect))
51    val rabCommits = Input(new RabCommitIO)
52    // from csr
53    val singleStep = Input(Bool())
54    // from decode
55    val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst)))
56    val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo))
57    // ssit read result
58    val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry)))
59    // waittable read result
60    val waittable = Flipped(Vec(RenameWidth, Output(Bool())))
61    // to rename table
62    val intReadPorts = Vec(RenameWidth, Vec(2, Input(UInt(PhyRegIdxWidth.W))))
63    val fpReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W))))
64    val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W))))
65    val v0ReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
66    val vlReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
67    val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(IntLogicRegs))))
68    val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(FpLogicRegs))))
69    val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VecLogicRegs))))
70    val v0RenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(V0LogicRegs))))
71    val vlRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VlLogicRegs))))
72    // from rename table
73    val int_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
74    val fp_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
75    val vec_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
76    val v0_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
77    val vl_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
78    val int_need_free = Vec(RabCommitWidth, Input(Bool()))
79    // to dispatch1
80    val out = Vec(RenameWidth, DecoupledIO(new DynInst))
81    // for snapshots
82    val snpt = Input(new SnapshotPort)
83    val snptLastEnq = Flipped(ValidIO(new RobPtr))
84    val snptIsFull= Input(Bool())
85    // debug arch ports
86    val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
87    val debug_fp_rat  = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
88    val debug_vec_rat = if (backendParams.debugEn) Some(Vec(31, Input(UInt(PhyRegIdxWidth.W)))) else None
89    val debug_v0_rat  = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
90    val debug_vl_rat  = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
91    // perf only
92    val stallReason = new Bundle {
93      val in = Flipped(new StallReasonIO(RenameWidth))
94      val out = new StallReasonIO(RenameWidth)
95    }
96  })
97
98  io.in.zipWithIndex.map { case (o, i) =>
99    PerfCCT.updateInstPos(o.bits.debug_seqNum, PerfCCT.InstPos.AtRename.id.U, o.valid, clock, reset)
100  }
101
102  // io alias
103  private val dispatchCanAcc = io.out.head.ready
104
105  val compressUnit = Module(new CompressUnit())
106  // create free list and rat
107  val intFreeList = Module(new MEFreeList(IntPhyRegs))
108  val fpFreeList = Module(new StdFreeList(FpPhyRegs - FpLogicRegs, FpLogicRegs, Reg_F))
109  val vecFreeList = Module(new StdFreeList(VfPhyRegs - VecLogicRegs, VecLogicRegs, Reg_V, 31))
110  val v0FreeList = Module(new StdFreeList(V0PhyRegs - V0LogicRegs, V0LogicRegs, Reg_V0, 1))
111  val vlFreeList = Module(new StdFreeList(VlPhyRegs - VlLogicRegs, VlLogicRegs, Reg_Vl, 1))
112
113
114  intFreeList.io.commit    <> io.rabCommits
115  intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get)
116  fpFreeList.io.commit     <> io.rabCommits
117  fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get)
118  vecFreeList.io.commit    <> io.rabCommits
119  vecFreeList.io.debug_rat.foreach(_ <> io.debug_vec_rat.get)
120  v0FreeList.io.commit <> io.rabCommits
121  v0FreeList.io.debug_rat.foreach(_ <> io.debug_v0_rat.get)
122  vlFreeList.io.commit <> io.rabCommits
123  vlFreeList.io.debug_rat.foreach(_ <> io.debug_vl_rat.get)
124
125  // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob)
126  def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match {
127    case Reg_I => x.rfWen
128    case Reg_F => x.fpWen
129    case Reg_V => x.vecWen
130    case Reg_V0 => x.v0Wen
131    case Reg_Vl => x.vlWen
132  }
133  def needDestRegCommit[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
134    reg_t match {
135      case Reg_I => x.rfWen
136      case Reg_F => x.fpWen
137      case Reg_V => x.vecWen
138      case Reg_V0 => x.v0Wen
139      case Reg_Vl => x.vlWen
140    }
141  }
142  def needDestRegWalk[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
143    reg_t match {
144      case Reg_I => x.rfWen
145      case Reg_F => x.fpWen
146      case Reg_V => x.vecWen
147      case Reg_V0 => x.v0Wen
148      case Reg_Vl => x.vlWen
149    }
150  }
151
152  // connect [redirect + walk] ports for fp & vec & int free list
153  Seq(fpFreeList, vecFreeList, intFreeList, v0FreeList, vlFreeList).foreach { case fl =>
154    fl.io.redirect := io.redirect.valid
155    fl.io.walk := io.rabCommits.isWalk
156  }
157  // only when all free list and dispatch1 has enough space can we do allocation
158  // when isWalk, freelist can definitely allocate
159  intFreeList.io.doAllocate := fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
160  fpFreeList.io.doAllocate := intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
161  vecFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
162  v0FreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
163  vlFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
164
165  //           dispatch1 ready ++ float point free list ready ++ int free list ready ++ vec free list ready     ++ not walk
166  val canOut = dispatchCanAcc && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
167
168  compressUnit.io.in.zip(io.in).foreach{ case(sink, source) =>
169    sink.valid := source.valid && !io.singleStep
170    sink.bits := source.bits
171  }
172  val needRobFlags = compressUnit.io.out.needRobFlags
173  val instrSizesVec = compressUnit.io.out.instrSizes
174  val compressMasksVec = compressUnit.io.out.masks
175
176  // speculatively assign the instruction with an robIdx
177  val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode)
178  val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr))
179  val lastCycleMisprediction = GatedValidRegNext(io.redirect.valid && !io.redirect.bits.flushItself())
180  val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index
181         Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself
182           Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx
183                      /* default */  robIdxHead))) // no instructions passed by this cycle: stick to old value
184  robIdxHead := robIdxHeadNext
185
186  /**
187    * Rename: allocate free physical register and update rename table
188    */
189  val uops = Wire(Vec(RenameWidth, new DynInst))
190  uops.foreach( uop => {
191    uop.srcState      := DontCare
192    uop.debugInfo     := DontCare
193    uop.lqIdx         := DontCare
194    uop.sqIdx         := DontCare
195    uop.waitForRobIdx := DontCare
196    uop.singleStep    := DontCare
197    uop.snapshot      := DontCare
198    uop.srcLoadDependency := DontCare
199    uop.numLsElem       :=  DontCare
200    uop.hasException  :=  DontCare
201    uop.useRegCache   := DontCare
202    uop.regCacheIdx   := DontCare
203    uop.traceBlockInPipe := DontCare
204    uop.isDropAmocasSta := DontCare
205  })
206  private val inst         = Wire(Vec(RenameWidth, new XSInstBitFields))
207  private val isCsr        = Wire(Vec(RenameWidth, Bool()))
208  private val isCsrr       = Wire(Vec(RenameWidth, Bool()))
209  private val isNotWaitForwardCsrr = Wire(Vec(RenameWidth, Bool()))
210  private val isNotBlockBackwardCsrr = Wire(Vec(RenameWidth, Bool()))
211  private val fuType       = uops.map(_.fuType)
212  private val fuOpType     = uops.map(_.fuOpType)
213  private val vtype        = uops.map(_.vpu.vtype)
214  private val sew          = vtype.map(_.vsew)
215  private val lmul         = vtype.map(_.vlmul)
216  private val eew          = uops.map(_.vpu.veew)
217  private val mop          = fuOpType.map(fuOpTypeItem => LSUOpType.getVecLSMop(fuOpTypeItem))
218  private val isVlsType    = fuType.map(fuTypeItem => isVls(fuTypeItem))
219  private val isSegment    = fuType.map(fuTypeItem => isVsegls(fuTypeItem))
220  private val isUnitStride = fuOpType.map(fuOpTypeItem => LSUOpType.isAllUS(fuOpTypeItem))
221  private val nf           = fuOpType.zip(uops.map(_.vpu.nf)).map { case (fuOpTypeItem, nfItem) => Mux(LSUOpType.isWhole(fuOpTypeItem), 0.U, nfItem) }
222  private val mulBits      = 3 // dirty code
223  private val emul         = fuOpType.zipWithIndex.map { case (fuOpTypeItem, index) =>
224    Mux(
225      LSUOpType.isWhole(fuOpTypeItem),
226      GenUSWholeEmul(nf(index)),
227      Mux(
228        LSUOpType.isMasked(fuOpTypeItem),
229        0.U(mulBits.W),
230        EewLog2(eew(index)) - sew(index) + lmul(index)
231      )
232    )
233  }
234  private val isVecUnitType = isVlsType.zip(isUnitStride).map { case (isVlsTypeItme, isUnitStrideItem) =>
235    isVlsTypeItme && isUnitStrideItem
236  }
237  private val isfofFixVlUop   = uops.map{x => x.vpu.isVleff && x.lastUop}
238  private val instType = isSegment.zip(mop).map { case (isSegementItem, mopItem) => Cat(isSegementItem, mopItem) }
239  // There is no way to calculate the 'flow' for 'unit-stride' exactly:
240  //  Whether 'unit-stride' needs to be split can only be known after obtaining the address.
241  // For scalar instructions, this is not handled here, and different assignments are done later according to the situation.
242  private val numLsElem = instType.zipWithIndex.map { case (instTypeItem, index) =>
243    Mux(
244      isVecUnitType(index),
245      VecMemUnitStrideMaxFlowNum.U,
246      GenRealFlowNum(instTypeItem, emul(index), lmul(index), eew(index), sew(index))
247    )
248  }
249  uops.zipWithIndex.map { case(u, i) =>
250    u.numLsElem := Mux(io.in(i).valid & isVlsType(i) && !isfofFixVlUop(i), numLsElem(i), 0.U)
251  }
252
253  val needVecDest    = Wire(Vec(RenameWidth, Bool()))
254  val needFpDest     = Wire(Vec(RenameWidth, Bool()))
255  val needIntDest    = Wire(Vec(RenameWidth, Bool()))
256  val needV0Dest     = Wire(Vec(RenameWidth, Bool()))
257  val needVlDest     = Wire(Vec(RenameWidth, Bool()))
258  private val inHeadValid = io.in.head.valid
259
260  val isMove = Wire(Vec(RenameWidth, Bool()))
261  isMove zip io.in.map(_.bits) foreach {
262    case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove)
263  }
264
265  val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
266  val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
267  val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
268  val walkNeedV0Dest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
269  val walkNeedVlDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
270  val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
271
272  val intSpecWen = Wire(Vec(RenameWidth, Bool()))
273  val fpSpecWen  = Wire(Vec(RenameWidth, Bool()))
274  val vecSpecWen = Wire(Vec(RenameWidth, Bool()))
275  val v0SpecWen = Wire(Vec(RenameWidth, Bool()))
276  val vlSpecWen = Wire(Vec(RenameWidth, Bool()))
277
278  val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
279
280  val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
281
282  io.out.zipWithIndex.foreach{ case (o, i) =>
283    o.bits.debug_seqNum := io.in(i).bits.debug_seqNum
284  }
285
286  // uop calculation
287  for (i <- 0 until RenameWidth) {
288    (uops(i): Data).waiveAll :<= (io.in(i).bits: Data).waiveAll
289
290    // read only CSRR instruction support: remove blockBackward and waitForward
291    inst(i) := uops(i).instr.asTypeOf(new XSInstBitFields)
292    isCsr(i) := inst(i).OPCODE5Bit === OPCODE5Bit.SYSTEM && inst(i).FUNCT3(1, 0) =/= 0.U
293    isCsrr(i) := isCsr(i) && inst(i).FUNCT3 === BitPat("b?1?") && inst(i).RS1 === 0.U
294    isNotWaitForwardCsrr(i) := isCsrr(i) && LookupTreeDefault(
295      inst(i).CSRIDX, true.B, CSROoORead.waitForwardInOrderCsrReadList.map(_.U -> false.B))
296    isNotBlockBackwardCsrr(i) := isCsrr(i) && LookupTreeDefault(
297      inst(i).CSRIDX, true.B, CSROoORead.blockBackwardInOrderCsrReadList.map(_.U -> false.B))
298
299    /*
300     * For most CSRs, CSRR instructions do not need to wait forward instructions.
301     *
302     * For most CSRs, CSRR instructions do not need to block backward instructions.
303     *
304     * Signal "isCsrr" contains not only "CSRR", but also other CSR instructions that do not require writing to CSR.
305     */
306    uops(i).waitForward := io.in(i).bits.waitForward && !isNotWaitForwardCsrr(i)
307    uops(i).blockBackward := io.in(i).bits.blockBackward && !isNotBlockBackwardCsrr(i)
308
309    // update cf according to ssit result
310    uops(i).storeSetHit := io.ssit(i).valid
311    uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid
312    uops(i).ssid := io.ssit(i).ssid
313
314    // update cf according to waittable result
315    uops(i).loadWaitBit := io.waittable(i)
316
317    uops(i).replayInst := false.B // set by IQ or MemQ
318    // alloc a new phy reg
319    needV0Dest(i) := io.in(i).valid && needDestReg(Reg_V0, io.in(i).bits)
320    needVlDest(i) := io.in(i).valid && needDestReg(Reg_Vl, io.in(i).bits)
321    needVecDest(i) := io.in(i).valid && needDestReg(Reg_V, io.in(i).bits)
322    needFpDest(i) := io.in(i).valid && needDestReg(Reg_F, io.in(i).bits)
323    needIntDest(i) := io.in(i).valid && needDestReg(Reg_I, io.in(i).bits)
324    if (i < RabCommitWidth) {
325      walkNeedIntDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_I, io.rabCommits.info(i))
326      walkNeedFpDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_F, io.rabCommits.info(i))
327      walkNeedVecDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V, io.rabCommits.info(i))
328      walkNeedV0Dest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V0, io.rabCommits.info(i))
329      walkNeedVlDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_Vl, io.rabCommits.info(i))
330      walkIsMove(i) := io.rabCommits.info(i).isMove
331    }
332    fpFreeList.io.allocateReq(i) := needFpDest(i)
333    fpFreeList.io.walkReq(i) := walkNeedFpDest(i)
334    vecFreeList.io.allocateReq(i) := needVecDest(i)
335    vecFreeList.io.walkReq(i) := walkNeedVecDest(i)
336    v0FreeList.io.allocateReq(i) := needV0Dest(i)
337    v0FreeList.io.walkReq(i) := walkNeedV0Dest(i)
338    vlFreeList.io.allocateReq(i) := needVlDest(i)
339    vlFreeList.io.walkReq(i) := walkNeedVlDest(i)
340    intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i)
341    intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i)
342
343    // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready
344    io.in(i).ready := !io.in(0).valid || canOut
345
346    uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag})
347    uops(i).instrSize := instrSizesVec(i)
348    val hasExceptionExceptFlushPipe = Cat(selectFrontend(uops(i).exceptionVec) :+ uops(i).exceptionVec(illegalInstr) :+ uops(i).exceptionVec(virtualInstr)).orR || TriggerAction.isDmode(uops(i).trigger)
349    when(isMove(i) || hasExceptionExceptFlushPipe) {
350      uops(i).numUops := 0.U
351      uops(i).numWB := 0.U
352    }
353    if (i > 0) {
354      when(!needRobFlags(i - 1)) {
355        uops(i).firstUop := false.B
356        uops(i).ftqPtr := uops(i - 1).ftqPtr
357        uops(i).ftqOffset := uops(i - 1).ftqOffset
358        uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
359        uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
360      }
361    }
362    when(!needRobFlags(i)) {
363      uops(i).lastUop := false.B
364      uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
365      uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
366    }
367    uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR
368    uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR
369    uops(i).dirtyVs := (
370      compressMasksVec(i) & Cat(io.in.map(in =>
371        // vector instructions' uopSplitType cannot be UopSplitType.SCA_SIM
372        in.bits.uopSplitType =/= UopSplitType.SCA_SIM &&
373        !UopSplitType.isAMOCAS(in.bits.uopSplitType) &&
374        // vfmv.f.s, vcpop.m, vfirst.m and vmv.x.s don't change vector state
375        !Seq(
376          (FuType.vfalu, VfaluType.vfmv_f_s), // vfmv.f.s
377          (FuType.vipu, VipuType.vcpop_m),    // vcpop.m
378          (FuType.vipu, VipuType.vfirst_m),   // vfirst.m
379          (FuType.vipu, VipuType.vmv_x_s)     // vmv.x.s
380        ).map(x => FuTypeOrR(in.bits.fuType, x._1) && in.bits.fuOpType === x._2).reduce(_ || _)
381      ).reverse)
382    ).orR
383    // psrc0,psrc1,psrc2 don't require v0ReadPorts because their srcType can distinguish whether they are V0 or not
384    uops(i).psrc(0) := Mux1H(uops(i).srcType(0)(2, 0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0)))
385    uops(i).psrc(1) := Mux1H(uops(i).srcType(1)(2, 0), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1)))
386    uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2)))
387    uops(i).psrc(3) := io.v0ReadPorts(i)(0)
388    uops(i).psrc(4) := io.vlReadPorts(i)(0)
389
390    // int psrc2 should be bypassed from next instruction if it is fused
391    if (i < RenameWidth - 1) {
392      when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) {
393        uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0))
394      }.elsewhen(io.fusionInfo(i).rs2FromZero) {
395        uops(i).psrc(1) := 0.U
396      }
397    }
398    uops(i).eliminatedMove := isMove(i)
399
400    // update pdest
401    uops(i).pdest := MuxCase(0.U, Seq(
402      needIntDest(i)    ->  intFreeList.io.allocatePhyReg(i),
403      needFpDest(i)     ->  fpFreeList.io.allocatePhyReg(i),
404      needVecDest(i)    ->  vecFreeList.io.allocatePhyReg(i),
405      needV0Dest(i)    ->  v0FreeList.io.allocatePhyReg(i),
406      needVlDest(i)    ->  vlFreeList.io.allocatePhyReg(i),
407    ))
408
409    // Assign performance counters
410    uops(i).debugInfo.renameTime := GTimer()
411
412    io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
413    io.out(i).bits := uops(i)
414    // dirty code
415    if (i == 0) {
416      io.out(i).bits.psrc(0) := Mux(io.out(i).bits.isLUI, 0.U, uops(i).psrc(0))
417    }
418    // Todo: move these shit in decode stage
419    // dirty code for fence. The lsrc is passed by imm.
420    when (io.out(i).bits.fuType === FuType.fence.U) {
421      io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0))
422    }
423
424    // dirty code for SoftPrefetch (prefetch.r/prefetch.w)
425//    when (io.in(i).bits.isSoftPrefetch) {
426//      io.out(i).bits.fuType := FuType.ldu.U
427//      io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w)
428//      io.out(i).bits.selImm := SelImm.IMM_S
429//      io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W))
430//    }
431
432    // dirty code for lui+addi(w) fusion
433    if (i < RenameWidth - 1) {
434      val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U
435      when (fused_lui32) {
436        val lui_imm = io.in(i).bits.imm(19, 0)
437        val add_imm = io.in(i + 1).bits.imm(11, 0)
438        require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + add_imm.getWidth)
439        io.out(i).bits.imm := Cat(lui_imm, add_imm)
440      }
441    }
442
443    // write speculative rename table
444    // we update rat later inside commit code
445    intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
446    fpSpecWen(i)  := needFpDest(i)  && fpFreeList.io.canAllocate  && fpFreeList.io.doAllocate  && !io.rabCommits.isWalk && !io.redirect.valid
447    vecSpecWen(i) := needVecDest(i) && vecFreeList.io.canAllocate && vecFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
448    v0SpecWen(i) := needV0Dest(i) && v0FreeList.io.canAllocate && v0FreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
449    vlSpecWen(i) := needVlDest(i) && vlFreeList.io.canAllocate && vlFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
450
451
452    if (i < RabCommitWidth) {
453      walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid
454      walkPdest(i) := io.rabCommits.info(i).pdest
455    } else {
456      walkPdest(i) := io.out(i).bits.pdest
457    }
458  }
459
460  /**
461   * trace begin
462   */
463  // note: fusionInst can't robcompress
464  val inVec = io.in.map(_.bits)
465  val isRVCVec = inVec.map(_.preDecodeInfo.isRVC)
466  val isFusionVec = inVec.map(_.commitType).map(ctype => CommitType.isFused(ctype))
467
468  val canRobCompressVec = compressUnit.io.out.canCompressVec
469  val iLastSizeVec = isRVCVec.map(isRVC => Mux(isRVC, Ilastsize.HalfWord, Ilastsize.Word))
470  val halfWordNumVec = isRVCVec.map(isRVC => Mux(isRVC, 1.U, 2.U))
471  val halfWordNumMatrix = (0 until RenameWidth).map(
472    i => compressMasksVec(i).asBools.zipWithIndex.map{ case(mask, j) =>
473      Mux(mask, halfWordNumVec(j), 0.U)
474    }
475  )
476
477  for (i <- 0 until RenameWidth) {
478    // iretire
479    uops(i).traceBlockInPipe.iretire := Mux(canRobCompressVec(i),
480      halfWordNumMatrix(i).reduce(_ +& _),
481      (if(i < RenameWidth -1) Mux(isFusionVec(i), halfWordNumVec(i+1), 0.U) else 0.U) +& halfWordNumVec(i)
482    )
483
484    // ilastsize
485    val tmp = i
486    val lastIsRVC = WireInit(false.B)
487    (tmp until RenameWidth).map { j =>
488      when(compressMasksVec(i)(j)) {
489        lastIsRVC := io.in(j).bits.preDecodeInfo.isRVC
490      }
491    }
492    uops(i).traceBlockInPipe.ilastsize := Mux(canRobCompressVec(i),
493      Mux(lastIsRVC, Ilastsize.HalfWord, Ilastsize.Word),
494      (if(i < RenameWidth -1) Mux(isFusionVec(i), iLastSizeVec(i+1), iLastSizeVec(i)) else iLastSizeVec(i))
495    )
496
497    // itype
498    uops(i).traceBlockInPipe.itype := Itype.jumpTypeGen(inVec(i).preDecodeInfo.brType, inVec(i).ldest.asTypeOf(new OpRegType), inVec(i).lsrc(0).asTypeOf((new OpRegType)))
499  }
500  /**
501   * trace end
502   */
503
504  /**
505    * How to set psrc:
506    * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc
507    * - default: psrc from RAT
508    * How to set pdest:
509    * - Mux(isMove, psrc, pdest_from_freelist).
510    *
511    * The critical path of rename lies here:
512    * When move elimination is enabled, we need to update the rat with psrc.
513    * However, psrc maybe comes from previous instructions' pdest, which comes from freelist.
514    *
515    * If we expand these logic for pdest(N):
516    * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N))
517    *          = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1),
518    *                           Mux(bypass(N, N - 2), pdest(N - 2),
519    *                           ...
520    *                           Mux(bypass(N, 0),     pdest(0),
521    *                                                 rat_out(N))...)),
522    *                           freelist_out(N))
523    */
524  // a simple functional model for now
525  io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest)
526
527  // psrc(n) + pdest(1)
528  val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))))
529  require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc)
530  private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype
531  println(s"[Rename] idx of pdest in bypassCond $pdestLoc")
532  for (i <- 1 until RenameWidth) {
533    val v0Cond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
534      if (i == 3) (s === SrcType.vp) || (s === SrcType.v0)
535      else false.B
536    }
537    val vlCond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
538      if (i == 4) s === SrcType.vp
539      else false.B
540    }
541    val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp)
542    val fpCond  = io.in(i).bits.srcType.map(_ === SrcType.fp)
543    val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp)
544    val target = io.in(i).bits.lsrc
545    for ((((((cond1, (condV0, condVl)), cond2), cond3), t), j) <- vecCond.zip(v0Cond.zip(vlCond)).zip(fpCond).zip(intCond).zip(target).zipWithIndex) {
546      val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) =>
547        val indexMatch = in.bits.ldest === t
548        val writeMatch =  cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j)
549        val v0vlMatch = condV0 && needV0Dest(j) || condVl && needVlDest(j)
550        indexMatch && writeMatch || v0vlMatch
551      }
552      bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt
553    }
554    // For the LUI instruction: psrc(0) is from register file and should always be zero.
555    io.out(i).bits.psrc(0) := Mux(io.out(i).bits.isLUI, 0.U, io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) {
556      (z, next) => Mux(next._2, next._1, z)
557    })
558    io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) {
559      (z, next) => Mux(next._2, next._1, z)
560    }
561    io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) {
562      (z, next) => Mux(next._2, next._1, z)
563    }
564    io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) {
565      (z, next) => Mux(next._2, next._1, z)
566    }
567    io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) {
568      (z, next) => Mux(next._2, next._1, z)
569    }
570    io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest)
571
572    // Todo: better implementation for fields reuse
573    // For fused-lui-load, load.src(0) is replaced by the imm.
574    val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc
575    val this_is_load = io.in(i).bits.fuType === FuType.ldu.U
576    val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.rfWen && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0)
577    val fused_lui_load = last_is_lui && this_is_load && lui_to_load
578    when (fused_lui_load) {
579      // The first LOAD operand (base address) is replaced by LUI-imm and stored in imm
580      val lui_imm = io.in(i - 1).bits.imm(ImmUnion.U.len - 1, 0)
581      val ld_imm = io.in(i).bits.imm(ImmUnion.I.len - 1, 0)
582      require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + ld_imm.getWidth)
583      io.out(i).bits.srcType(0) := SrcType.imm
584      io.out(i).bits.imm := Cat(lui_imm, ld_imm)
585    }
586
587  }
588
589  val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR
590  val lastCycleCreateSnpt = RegInit(false.B)
591  lastCycleCreateSnpt := genSnapshot && !io.snptIsFull
592  val sameSnptDistance = (RobCommitWidth * 4).U
593  // notInSameSnpt: 1.robidxHead - snapLastEnq >= sameSnptDistance 2.no snap
594  val notInSameSnpt = GatedValidRegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= sameSnptDistance || !io.snptLastEnq.valid)
595  val allowSnpt = if (EnableRenameSnapshot) notInSameSnpt && !lastCycleCreateSnpt && io.in.head.bits.firstUop else false.B
596  io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire }
597  io.out.map{ x =>
598    x.bits.hasException := Cat(selectFrontend(x.bits.exceptionVec) :+ x.bits.exceptionVec(illegalInstr) :+ x.bits.exceptionVec(virtualInstr)).orR || TriggerAction.isDmode(x.bits.trigger)
599  }
600  if(backendParams.debugEn){
601    dontTouch(robIdxHeadNext)
602    dontTouch(notInSameSnpt)
603    dontTouch(genSnapshot)
604  }
605  intFreeList.io.snpt := io.snpt
606  fpFreeList.io.snpt := io.snpt
607  vecFreeList.io.snpt := io.snpt
608  v0FreeList.io.snpt := io.snpt
609  vlFreeList.io.snpt := io.snpt
610  intFreeList.io.snpt.snptEnq := genSnapshot
611  fpFreeList.io.snpt.snptEnq := genSnapshot
612  vecFreeList.io.snpt.snptEnq := genSnapshot
613  v0FreeList.io.snpt.snptEnq := genSnapshot
614  vlFreeList.io.snpt.snptEnq := genSnapshot
615
616  /**
617    * Instructions commit: update freelist and rename table
618    */
619  for (i <- 0 until RabCommitWidth) {
620    val commitValid = io.rabCommits.isCommit && io.rabCommits.commitValid(i)
621    val walkValid = io.rabCommits.isWalk && io.rabCommits.walkValid(i)
622
623    // I. RAT Update
624    // When redirect happens (mis-prediction), don't update the rename table
625    io.intRenamePorts(i).wen  := intSpecWen(i)
626    io.intRenamePorts(i).addr := uops(i).ldest(log2Ceil(IntLogicRegs) - 1, 0)
627    io.intRenamePorts(i).data := io.out(i).bits.pdest
628
629    io.fpRenamePorts(i).wen  := fpSpecWen(i)
630    io.fpRenamePorts(i).addr := uops(i).ldest(log2Ceil(FpLogicRegs) - 1, 0)
631    io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i)
632
633    io.vecRenamePorts(i).wen := vecSpecWen(i)
634    io.vecRenamePorts(i).addr := uops(i).ldest(log2Ceil(VecLogicRegs) - 1, 0)
635    io.vecRenamePorts(i).data := vecFreeList.io.allocatePhyReg(i)
636
637    io.v0RenamePorts(i).wen := v0SpecWen(i)
638    io.v0RenamePorts(i).addr := uops(i).ldest(log2Ceil(V0LogicRegs) - 1, 0)
639    io.v0RenamePorts(i).data := v0FreeList.io.allocatePhyReg(i)
640
641    io.vlRenamePorts(i).wen := vlSpecWen(i)
642    io.vlRenamePorts(i).addr := uops(i).ldest(log2Ceil(VlLogicRegs) - 1, 0)
643    io.vlRenamePorts(i).data := vlFreeList.io.allocatePhyReg(i)
644
645    // II. Free List Update
646    intFreeList.io.freeReq(i) := io.int_need_free(i)
647    intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i))
648    fpFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_F, io.rabCommits.info(i)))
649    fpFreeList.io.freePhyReg(i) := io.fp_old_pdest(i)
650    vecFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V, io.rabCommits.info(i)))
651    vecFreeList.io.freePhyReg(i) := io.vec_old_pdest(i)
652    v0FreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V0, io.rabCommits.info(i)))
653    v0FreeList.io.freePhyReg(i) := io.v0_old_pdest(i)
654    vlFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_Vl, io.rabCommits.info(i)))
655    vlFreeList.io.freePhyReg(i) := io.vl_old_pdest(i)
656  }
657
658  /*
659  Debug and performance counters
660   */
661  def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = {
662    XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " +
663      p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " +
664      p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " +
665      p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " +
666      p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n"
667    )
668  }
669
670  for ((x,y) <- io.in.zip(io.out)) {
671    printRenameInfo(x, y)
672  }
673
674  io.out.map { case x =>
675    when(x.valid && x.bits.rfWen){
676      assert(x.bits.ldest =/= 0.U, "rfWen cannot be 1 when Int regfile ldest is 0")
677    }
678  }
679  val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid)
680  // bad speculation
681  val recStall = io.redirect.valid || io.rabCommits.isWalk
682  val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.rabCommits.isWalk && debugRedirect.debugIsCtrl)
683  val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.rabCommits.isWalk && debugRedirect.debugIsMemVio)
684  val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall)
685  XSPerfAccumulate("recovery_stall", recStall)
686  XSPerfAccumulate("control_recovery_stall", ctrlRecStall)
687  XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall)
688  XSPerfAccumulate("other_recovery_stall", otherRecStall)
689  // freelist stall
690  val notRecStall = !io.out.head.valid && !recStall
691  val intFlStall = notRecStall && inHeadValid && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
692  val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
693  val vecFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
694  val v0FlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
695  val vlFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
696  val multiFlStall = notRecStall && inHeadValid && (PopCount(Cat(
697    !intFreeList.io.canAllocate,
698    !fpFreeList.io.canAllocate,
699    !vecFreeList.io.canAllocate,
700    !v0FreeList.io.canAllocate,
701    !vlFreeList.io.canAllocate,
702  )) > 1.U)
703  // other stall
704  val otherStall = notRecStall && !intFlStall && !fpFlStall && !vecFlStall && !v0FlStall && !vlFlStall && !multiFlStall
705
706  io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready
707  io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits,
708    MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq(
709      ctrlRecStall  -> TopDownCounters.ControlRecoveryStall.id.U,
710      mvioRecStall  -> TopDownCounters.MemVioRecoveryStall.id.U,
711      otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U,
712      intFlStall    -> TopDownCounters.IntFlStall.id.U,
713      fpFlStall     -> TopDownCounters.FpFlStall.id.U,
714      vecFlStall    -> TopDownCounters.VecFlStall.id.U,
715      v0FlStall     -> TopDownCounters.V0FlStall.id.U,
716      vlFlStall     -> TopDownCounters.VlFlStall.id.U,
717      multiFlStall  -> TopDownCounters.MultiFlStall.id.U,
718    )
719  ))
720  io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) =>
721    out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in)
722  }
723
724  XSDebug(io.rabCommits.isWalk, p"Walk Recovery Enabled\n")
725  XSDebug(io.rabCommits.isWalk, p"validVec:${Binary(io.rabCommits.walkValid.asUInt)}\n")
726  for (i <- 0 until RabCommitWidth) {
727    val info = io.rabCommits.info(i)
728    XSDebug(io.rabCommits.isWalk && io.rabCommits.walkValid(i), p"[#$i walk info] " +
729      p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen} v0Wen:${info.v0Wen} vlWen:${info.vlWen}")
730  }
731
732  XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n")
733
734  XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid)))
735  XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire)))
736  XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready)))
737  XSPerfAccumulate("wait_cycle", !io.in.head.valid && dispatchCanAcc)
738
739  // These stall reasons could overlap each other, but we configure the priority as fellows.
740  // walk stall > dispatch stall > int freelist stall > fp freelist stall
741  private val inHeadStall = io.in.head match { case x => x.valid && !x.ready }
742  private val stallForWalk      = inHeadValid &&  io.rabCommits.isWalk
743  private val stallForDispatch  = inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc
744  private val stallForIntFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
745  private val stallForFpFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
746  private val stallForVecFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
747  private val stallForV0FL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
748  private val stallForVlFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
749  XSPerfAccumulate("stall_cycle",          inHeadStall)
750  XSPerfAccumulate("stall_cycle_walk",     stallForWalk)
751  XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch)
752  XSPerfAccumulate("stall_cycle_int",      stallForIntFL)
753  XSPerfAccumulate("stall_cycle_fp",       stallForFpFL)
754  XSPerfAccumulate("stall_cycle_vec",      stallForVecFL)
755  XSPerfAccumulate("stall_cycle_vec",      stallForV0FL)
756  XSPerfAccumulate("stall_cycle_vec",      stallForVlFL)
757
758  XSPerfHistogram("in_valid_range",  PopCount(io.in.map(_.valid)),  true.B, 0, DecodeWidth + 1, 1)
759  XSPerfHistogram("in_fire_range",   PopCount(io.in.map(_.fire)),   true.B, 0, DecodeWidth + 1, 1)
760  XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1)
761  XSPerfHistogram("out_fire_range",  PopCount(io.out.map(_.fire)),  true.B, 0, DecodeWidth + 1, 1)
762
763  XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove)))
764  val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm)
765  XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load))
766
767  val renamePerf = Seq(
768    ("rename_in                  ", PopCount(io.in.map(_.valid & io.in(0).ready ))),
769    ("rename_waitinstr           ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))),
770    ("rename_stall               ", inHeadStall),
771    ("rename_stall_cycle_walk    ", inHeadValid &&  io.rabCommits.isWalk),
772    ("rename_stall_cycle_dispatch", inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc),
773    ("rename_stall_cycle_int     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate),
774    ("rename_stall_cycle_fp      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate),
775    ("rename_stall_cycle_vec     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate),
776    ("rename_stall_cycle_v0      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate),
777    ("rename_stall_cycle_vl      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate),
778  )
779  val intFlPerf = intFreeList.getPerfEvents
780  val fpFlPerf = fpFreeList.getPerfEvents
781  val vecFlPerf = vecFreeList.getPerfEvents
782  val v0FlPerf = v0FreeList.getPerfEvents
783  val vlFlPerf = vlFreeList.getPerfEvents
784  val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf ++ vecFlPerf ++ v0FlPerf ++ vlFlPerf
785  generatePerfEvent()
786}
787