xref: /XiangShan/src/main/scala/xiangshan/backend/rename/Rename.scala (revision 955b4bea521f395431f89430ee6b82ccae91f096)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend.rename
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utility._
23import utils._
24import xiangshan._
25import xiangshan.backend.Bundles.{DecodedInst, DynInst}
26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U}
27import xiangshan.backend.fu.FuType
28import xiangshan.backend.rename.freelist._
29import xiangshan.backend.rob.{RobEnqIO, RobPtr}
30import xiangshan.mem.mdp._
31import xiangshan.ExceptionNO._
32import xiangshan.backend.fu.FuType._
33import xiangshan.mem.{EewLog2, GenUSWholeEmul}
34import xiangshan.mem.GenRealFlowNum
35
36class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents {
37
38  // params alias
39  private val numRegSrc = backendParams.numRegSrc
40  private val numVecRegSrc = backendParams.numVecRegSrc
41  private val numVecRatPorts = numVecRegSrc
42
43  println(s"[Rename] numRegSrc: $numRegSrc")
44
45  val io = IO(new Bundle() {
46    val redirect = Flipped(ValidIO(new Redirect))
47    val rabCommits = Input(new RabCommitIO)
48    // from decode
49    val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst)))
50    val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo))
51    // ssit read result
52    val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry)))
53    // waittable read result
54    val waittable = Flipped(Vec(RenameWidth, Output(Bool())))
55    // to rename table
56    val intReadPorts = Vec(RenameWidth, Vec(2, Input(UInt(PhyRegIdxWidth.W))))
57    val fpReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W))))
58    val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W))))
59    val v0ReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
60    val vlReadPorts = Vec(RenameWidth, Vec(1, Input(UInt(PhyRegIdxWidth.W))))
61    val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(IntLogicRegs))))
62    val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(FpLogicRegs))))
63    val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VecLogicRegs))))
64    val v0RenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(V0LogicRegs))))
65    val vlRenamePorts = Vec(RenameWidth, Output(new RatWritePort(log2Ceil(VlLogicRegs))))
66    // from rename table
67    val int_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
68    val fp_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
69    val vec_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
70    val v0_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
71    val vl_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W)))
72    val int_need_free = Vec(RabCommitWidth, Input(Bool()))
73    // to dispatch1
74    val out = Vec(RenameWidth, DecoupledIO(new DynInst))
75    // for snapshots
76    val snpt = Input(new SnapshotPort)
77    val snptLastEnq = Flipped(ValidIO(new RobPtr))
78    val snptIsFull= Input(Bool())
79    // debug arch ports
80    val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
81    val debug_fp_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None
82    val debug_vec_rat = if (backendParams.debugEn) Some(Vec(31, Input(UInt(PhyRegIdxWidth.W)))) else None
83    val debug_v0_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
84    val debug_vl_rat = if (backendParams.debugEn) Some(Vec(1, Input(UInt(PhyRegIdxWidth.W)))) else None
85    // perf only
86    val stallReason = new Bundle {
87      val in = Flipped(new StallReasonIO(RenameWidth))
88      val out = new StallReasonIO(RenameWidth)
89    }
90  })
91
92  // io alias
93  private val dispatchCanAcc = io.out.head.ready
94
95  val compressUnit = Module(new CompressUnit())
96  // create free list and rat
97  val intFreeList = Module(new MEFreeList(IntPhyRegs))
98  val fpFreeList = Module(new StdFreeList(FpPhyRegs - FpLogicRegs, FpLogicRegs, Reg_F))
99  val vecFreeList = Module(new StdFreeList(VfPhyRegs - VecLogicRegs, VecLogicRegs, Reg_V, 31))
100  val v0FreeList = Module(new StdFreeList(V0PhyRegs - V0LogicRegs, V0LogicRegs, Reg_V0, 1))
101  val vlFreeList = Module(new StdFreeList(VlPhyRegs - VlLogicRegs, VlLogicRegs, Reg_Vl, 1))
102
103
104  intFreeList.io.commit    <> io.rabCommits
105  intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get)
106  fpFreeList.io.commit     <> io.rabCommits
107  fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get)
108  vecFreeList.io.commit    <> io.rabCommits
109  vecFreeList.io.debug_rat.foreach(_ <> io.debug_vec_rat.get)
110  v0FreeList.io.commit <> io.rabCommits
111  v0FreeList.io.debug_rat.foreach(_ <> io.debug_v0_rat.get)
112  vlFreeList.io.commit <> io.rabCommits
113  vlFreeList.io.debug_rat.foreach(_ <> io.debug_vl_rat.get)
114
115  // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob)
116  def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match {
117    case Reg_I => x.rfWen && x.ldest =/= 0.U
118    case Reg_F => x.fpWen
119    case Reg_V => x.vecWen
120    case Reg_V0 => x.v0Wen
121    case Reg_Vl => x.vlWen
122  }
123  def needDestRegCommit[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
124    reg_t match {
125      case Reg_I => x.rfWen
126      case Reg_F => x.fpWen
127      case Reg_V => x.vecWen
128      case Reg_V0 => x.v0Wen
129      case Reg_Vl => x.vlWen
130    }
131  }
132  def needDestRegWalk[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = {
133    reg_t match {
134      case Reg_I => x.rfWen && x.ldest =/= 0.U
135      case Reg_F => x.fpWen
136      case Reg_V => x.vecWen
137      case Reg_V0 => x.v0Wen
138      case Reg_Vl => x.vlWen
139    }
140  }
141
142  // connect [redirect + walk] ports for fp & vec & int free list
143  Seq(fpFreeList, vecFreeList, intFreeList, v0FreeList, vlFreeList).foreach { case fl =>
144    fl.io.redirect := io.redirect.valid
145    fl.io.walk := io.rabCommits.isWalk
146  }
147  // only when all free list and dispatch1 has enough space can we do allocation
148  // when isWalk, freelist can definitely allocate
149  intFreeList.io.doAllocate := fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
150  fpFreeList.io.doAllocate := intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
151  vecFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
152  v0FreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
153  vlFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk
154
155  //           dispatch1 ready ++ float point free list ready ++ int free list ready ++ vec free list ready     ++ not walk
156  val canOut = dispatchCanAcc && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
157
158  compressUnit.io.in.zip(io.in).foreach{ case(sink, source) =>
159    sink.valid := source.valid
160    sink.bits := source.bits
161  }
162  val needRobFlags = compressUnit.io.out.needRobFlags
163  val instrSizesVec = compressUnit.io.out.instrSizes
164  val compressMasksVec = compressUnit.io.out.masks
165
166  // speculatively assign the instruction with an robIdx
167  val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode)
168  val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr))
169  val lastCycleMisprediction = GatedValidRegNext(io.redirect.valid && !io.redirect.bits.flushItself())
170  val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index
171         Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself
172           Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx
173                      /* default */  robIdxHead))) // no instructions passed by this cycle: stick to old value
174  robIdxHead := robIdxHeadNext
175
176  /**
177    * Rename: allocate free physical register and update rename table
178    */
179  val uops = Wire(Vec(RenameWidth, new DynInst))
180  uops.foreach( uop => {
181    uop.srcState      := DontCare
182    uop.debugInfo     := DontCare
183    uop.lqIdx         := DontCare
184    uop.sqIdx         := DontCare
185    uop.waitForRobIdx := DontCare
186    uop.singleStep    := DontCare
187    uop.snapshot      := DontCare
188    uop.srcLoadDependency := DontCare
189    uop.numLsElem       :=  DontCare
190    uop.hasException  :=  DontCare
191    uop.useRegCache   := DontCare
192    uop.regCacheIdx   := DontCare
193  })
194  private val fuType       = uops.map(_.fuType)
195  private val fuOpType     = uops.map(_.fuOpType)
196  private val vtype        = uops.map(_.vpu.vtype)
197  private val sew          = vtype.map(_.vsew)
198  private val lmul         = vtype.map(_.vlmul)
199  private val eew          = uops.map(_.vpu.veew)
200  private val mop          = fuOpType.map(fuOpTypeItem => LSUOpType.getVecLSMop(fuOpTypeItem))
201  private val isVlsType    = fuType.map(fuTypeItem => isVls(fuTypeItem))
202  private val isSegment    = fuType.map(fuTypeItem => isVsegls(fuTypeItem))
203  private val isUnitStride = fuOpType.map(fuOpTypeItem => LSUOpType.isAllUS(fuOpTypeItem))
204  private val nf           = fuOpType.zip(uops.map(_.vpu.nf)).map { case (fuOpTypeItem, nfItem) => Mux(LSUOpType.isWhole(fuOpTypeItem), 0.U, nfItem) }
205  private val mulBits      = 3 // dirty code
206  private val emul         = fuOpType.zipWithIndex.map { case (fuOpTypeItem, index) =>
207    Mux(
208      LSUOpType.isWhole(fuOpTypeItem),
209      GenUSWholeEmul(nf(index)),
210      Mux(
211        LSUOpType.isMasked(fuOpTypeItem),
212        0.U(mulBits.W),
213        EewLog2(eew(index)) - sew(index) + lmul(index)
214      )
215    )
216  }
217  private val isVecUnitType = isVlsType.zip(isUnitStride).map { case (isVlsTypeItme, isUnitStrideItem) =>
218    isVlsTypeItme && isUnitStrideItem
219  }
220  private val instType = isSegment.zip(mop).map { case (isSegementItem, mopItem) => Cat(isSegementItem, mopItem) }
221  // There is no way to calculate the 'flow' for 'unit-stride' exactly:
222  //  Whether 'unit-stride' needs to be split can only be known after obtaining the address.
223  // For scalar instructions, this is not handled here, and different assignments are done later according to the situation.
224  private val numLsElem = instType.zipWithIndex.map { case (instTypeItem, index) =>
225    Mux(
226      isVecUnitType(index),
227      VecMemUnitStrideMaxFlowNum.U,
228      GenRealFlowNum(instTypeItem, emul(index), lmul(index), eew(index), sew(index))
229    )
230  }
231  uops.zipWithIndex.map { case(u, i) =>
232    u.numLsElem := Mux(io.in(i).valid & isVlsType(i), numLsElem(i), 0.U)
233  }
234
235  val needVecDest    = Wire(Vec(RenameWidth, Bool()))
236  val needFpDest     = Wire(Vec(RenameWidth, Bool()))
237  val needIntDest    = Wire(Vec(RenameWidth, Bool()))
238  val needV0Dest     = Wire(Vec(RenameWidth, Bool()))
239  val needVlDest     = Wire(Vec(RenameWidth, Bool()))
240  val hasValid = Cat(io.in.map(_.valid)).orR
241  private val inHeadValid = io.in.head.valid
242
243  val isMove = Wire(Vec(RenameWidth, Bool()))
244  isMove zip io.in.map(_.bits) foreach {
245    case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove)
246  }
247
248  val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
249  val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
250  val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
251  val walkNeedV0Dest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
252  val walkNeedVlDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
253  val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
254
255  val intSpecWen = Wire(Vec(RenameWidth, Bool()))
256  val fpSpecWen  = Wire(Vec(RenameWidth, Bool()))
257  val vecSpecWen = Wire(Vec(RenameWidth, Bool()))
258  val v0SpecWen = Wire(Vec(RenameWidth, Bool()))
259  val vlSpecWen = Wire(Vec(RenameWidth, Bool()))
260
261  val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B)))
262
263  val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W)))
264
265  // uop calculation
266  for (i <- 0 until RenameWidth) {
267    (uops(i): Data).waiveAll :<= (io.in(i).bits: Data).waiveAll
268
269    // update cf according to ssit result
270    uops(i).storeSetHit := io.ssit(i).valid
271    uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid
272    uops(i).ssid := io.ssit(i).ssid
273
274    // update cf according to waittable result
275    uops(i).loadWaitBit := io.waittable(i)
276
277    uops(i).replayInst := false.B // set by IQ or MemQ
278    // alloc a new phy reg
279    needV0Dest(i) := io.in(i).valid && needDestReg(Reg_V0, io.in(i).bits)
280    needVlDest(i) := io.in(i).valid && needDestReg(Reg_Vl, io.in(i).bits)
281    needVecDest(i) := io.in(i).valid && needDestReg(Reg_V, io.in(i).bits)
282    needFpDest(i) := io.in(i).valid && needDestReg(Reg_F, io.in(i).bits)
283    needIntDest(i) := io.in(i).valid && needDestReg(Reg_I, io.in(i).bits)
284    if (i < RabCommitWidth) {
285      walkNeedIntDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_I, io.rabCommits.info(i))
286      walkNeedFpDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_F, io.rabCommits.info(i))
287      walkNeedVecDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V, io.rabCommits.info(i))
288      walkNeedV0Dest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V0, io.rabCommits.info(i))
289      walkNeedVlDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_Vl, io.rabCommits.info(i))
290      walkIsMove(i) := io.rabCommits.info(i).isMove
291    }
292    fpFreeList.io.allocateReq(i) := needFpDest(i)
293    fpFreeList.io.walkReq(i) := walkNeedFpDest(i)
294    vecFreeList.io.allocateReq(i) := needVecDest(i)
295    vecFreeList.io.walkReq(i) := walkNeedVecDest(i)
296    v0FreeList.io.allocateReq(i) := needV0Dest(i)
297    v0FreeList.io.walkReq(i) := walkNeedV0Dest(i)
298    vlFreeList.io.allocateReq(i) := needVlDest(i)
299    vlFreeList.io.walkReq(i) := walkNeedVlDest(i)
300    intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i)
301    intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i)
302
303    // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready
304    io.in(i).ready := canOut
305
306    uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag})
307    uops(i).instrSize := instrSizesVec(i)
308    val hasExceptionExceptFlushPipe = Cat(selectFrontend(uops(i).exceptionVec) :+ uops(i).exceptionVec(illegalInstr) :+ uops(i).exceptionVec(virtualInstr)).orR || uops(i).trigger.getFrontendCanFire
309    when(isMove(i) || hasExceptionExceptFlushPipe) {
310      uops(i).numUops := 0.U
311      uops(i).numWB := 0.U
312    }
313    if (i > 0) {
314      when(!needRobFlags(i - 1)) {
315        uops(i).firstUop := false.B
316        uops(i).ftqPtr := uops(i - 1).ftqPtr
317        uops(i).ftqOffset := uops(i - 1).ftqOffset
318        uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
319        uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
320      }
321    }
322    when(!needRobFlags(i)) {
323      uops(i).lastUop := false.B
324      uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
325      uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse))
326    }
327    uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR
328    uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR
329    // vector instructions' uopSplitType cannot be UopSplitType.SCA_SIM
330    uops(i).dirtyVs := (compressMasksVec(i) & Cat(io.in.map(_.bits.uopSplitType =/= UopSplitType.SCA_SIM).reverse)).orR
331    // psrc0,psrc1,psrc2 don't require v0ReadPorts because their srcType can distinguish whether they are V0 or not
332    uops(i).psrc(0) := Mux1H(uops(i).srcType(0)(2, 0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0)))
333    uops(i).psrc(1) := Mux1H(uops(i).srcType(1)(2, 0), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1)))
334    uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2)))
335    uops(i).psrc(3) := io.v0ReadPorts(i)(0)
336    uops(i).psrc(4) := io.vlReadPorts(i)(0)
337
338    // int psrc2 should be bypassed from next instruction if it is fused
339    if (i < RenameWidth - 1) {
340      when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) {
341        uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0))
342      }.elsewhen(io.fusionInfo(i).rs2FromZero) {
343        uops(i).psrc(1) := 0.U
344      }
345    }
346    uops(i).eliminatedMove := isMove(i)
347
348    // update pdest
349    uops(i).pdest := MuxCase(0.U, Seq(
350      needIntDest(i)    ->  intFreeList.io.allocatePhyReg(i),
351      needFpDest(i)     ->  fpFreeList.io.allocatePhyReg(i),
352      needVecDest(i)    ->  vecFreeList.io.allocatePhyReg(i),
353      needV0Dest(i)    ->  v0FreeList.io.allocatePhyReg(i),
354      needVlDest(i)    ->  vlFreeList.io.allocatePhyReg(i),
355    ))
356
357    // Assign performance counters
358    uops(i).debugInfo.renameTime := GTimer()
359
360    io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !io.rabCommits.isWalk
361    io.out(i).bits := uops(i)
362    // Todo: move these shit in decode stage
363    // dirty code for fence. The lsrc is passed by imm.
364    when (io.out(i).bits.fuType === FuType.fence.U) {
365      io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0))
366    }
367
368    // dirty code for SoftPrefetch (prefetch.r/prefetch.w)
369//    when (io.in(i).bits.isSoftPrefetch) {
370//      io.out(i).bits.fuType := FuType.ldu.U
371//      io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w)
372//      io.out(i).bits.selImm := SelImm.IMM_S
373//      io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W))
374//    }
375
376    // dirty code for lui+addi(w) fusion
377    if (i < RenameWidth - 1) {
378      val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U
379      when (fused_lui32) {
380        val lui_imm = io.in(i).bits.imm(19, 0)
381        val add_imm = io.in(i + 1).bits.imm(11, 0)
382        require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + add_imm.getWidth)
383        io.out(i).bits.imm := Cat(lui_imm, add_imm)
384      }
385    }
386
387    // write speculative rename table
388    // we update rat later inside commit code
389    intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
390    fpSpecWen(i)  := needFpDest(i)  && fpFreeList.io.canAllocate  && fpFreeList.io.doAllocate  && !io.rabCommits.isWalk && !io.redirect.valid
391    vecSpecWen(i) := needVecDest(i) && vecFreeList.io.canAllocate && vecFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
392    v0SpecWen(i) := needV0Dest(i) && v0FreeList.io.canAllocate && v0FreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
393    vlSpecWen(i) := needVlDest(i) && vlFreeList.io.canAllocate && vlFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid
394
395
396    if (i < RabCommitWidth) {
397      walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid
398      walkPdest(i) := io.rabCommits.info(i).pdest
399    } else {
400      walkPdest(i) := io.out(i).bits.pdest
401    }
402  }
403
404  /**
405    * How to set psrc:
406    * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc
407    * - default: psrc from RAT
408    * How to set pdest:
409    * - Mux(isMove, psrc, pdest_from_freelist).
410    *
411    * The critical path of rename lies here:
412    * When move elimination is enabled, we need to update the rat with psrc.
413    * However, psrc maybe comes from previous instructions' pdest, which comes from freelist.
414    *
415    * If we expand these logic for pdest(N):
416    * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N))
417    *          = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1),
418    *                           Mux(bypass(N, N - 2), pdest(N - 2),
419    *                           ...
420    *                           Mux(bypass(N, 0),     pdest(0),
421    *                                                 rat_out(N))...)),
422    *                           freelist_out(N))
423    */
424  // a simple functional model for now
425  io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest)
426
427  // psrc(n) + pdest(1)
428  val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc + 1, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))))
429  require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc)
430  private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype
431  println(s"[Rename] idx of pdest in bypassCond $pdestLoc")
432  for (i <- 1 until RenameWidth) {
433    val v0Cond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
434      if (i == 3) (s === SrcType.vp) || (s === SrcType.v0)
435      else false.B
436    } :+ needV0Dest(i)
437    val vlCond = io.in(i).bits.srcType.zipWithIndex.map{ case (s, i) =>
438      if (i == 4) s === SrcType.vp
439      else false.B
440    } :+ needVlDest(i)
441    val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp) :+ needVecDest(i)
442    val fpCond  = io.in(i).bits.srcType.map(_ === SrcType.fp) :+ needFpDest(i)
443    val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp) :+ needIntDest(i)
444    val target = io.in(i).bits.lsrc :+ io.in(i).bits.ldest
445    for ((((((cond1, (condV0, condVl)), cond2), cond3), t), j) <- vecCond.zip(v0Cond.zip(vlCond)).zip(fpCond).zip(intCond).zip(target).zipWithIndex) {
446      val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) =>
447        val indexMatch = in.bits.ldest === t
448        val writeMatch =  cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j)
449        val v0vlMatch = condV0 && needV0Dest(j) || condVl && needVlDest(j)
450        indexMatch && writeMatch || v0vlMatch
451      }
452      bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt
453    }
454    io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) {
455      (z, next) => Mux(next._2, next._1, z)
456    }
457    io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) {
458      (z, next) => Mux(next._2, next._1, z)
459    }
460    io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) {
461      (z, next) => Mux(next._2, next._1, z)
462    }
463    io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) {
464      (z, next) => Mux(next._2, next._1, z)
465    }
466    io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) {
467      (z, next) => Mux(next._2, next._1, z)
468    }
469    io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest)
470
471    // Todo: better implementation for fields reuse
472    // For fused-lui-load, load.src(0) is replaced by the imm.
473    val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc
474    val this_is_load = io.in(i).bits.fuType === FuType.ldu.U
475    val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0)
476    val fused_lui_load = last_is_lui && this_is_load && lui_to_load
477    when (fused_lui_load) {
478      // The first LOAD operand (base address) is replaced by LUI-imm and stored in imm
479      val lui_imm = io.in(i - 1).bits.imm(ImmUnion.U.len - 1, 0)
480      val ld_imm = io.in(i).bits.imm(ImmUnion.I.len - 1, 0)
481      require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + ld_imm.getWidth)
482      io.out(i).bits.srcType(0) := SrcType.imm
483      io.out(i).bits.imm := Cat(lui_imm, ld_imm)
484    }
485
486  }
487
488  val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR
489  val lastCycleCreateSnpt = RegInit(false.B)
490  lastCycleCreateSnpt := genSnapshot && !io.snptIsFull
491  val sameSnptDistance = (RobCommitWidth * 4).U
492  // notInSameSnpt: 1.robidxHead - snapLastEnq >= sameSnptDistance 2.no snap
493  val notInSameSnpt = GatedValidRegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= sameSnptDistance || !io.snptLastEnq.valid)
494  val allowSnpt = if (EnableRenameSnapshot) notInSameSnpt && !lastCycleCreateSnpt && io.in.head.bits.firstUop else false.B
495  io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire }
496  io.out.map{ x =>
497    x.bits.hasException := Cat(selectFrontend(x.bits.exceptionVec) :+ x.bits.exceptionVec(illegalInstr) :+ x.bits.exceptionVec(virtualInstr)).orR || x.bits.trigger.getFrontendCanFire
498  }
499  if(backendParams.debugEn){
500    dontTouch(robIdxHeadNext)
501    dontTouch(notInSameSnpt)
502    dontTouch(genSnapshot)
503  }
504  intFreeList.io.snpt := io.snpt
505  fpFreeList.io.snpt := io.snpt
506  vecFreeList.io.snpt := io.snpt
507  v0FreeList.io.snpt := io.snpt
508  vlFreeList.io.snpt := io.snpt
509  intFreeList.io.snpt.snptEnq := genSnapshot
510  fpFreeList.io.snpt.snptEnq := genSnapshot
511  vecFreeList.io.snpt.snptEnq := genSnapshot
512  v0FreeList.io.snpt.snptEnq := genSnapshot
513  vlFreeList.io.snpt.snptEnq := genSnapshot
514
515  /**
516    * Instructions commit: update freelist and rename table
517    */
518  for (i <- 0 until RabCommitWidth) {
519    val commitValid = io.rabCommits.isCommit && io.rabCommits.commitValid(i)
520    val walkValid = io.rabCommits.isWalk && io.rabCommits.walkValid(i)
521
522    // I. RAT Update
523    // When redirect happens (mis-prediction), don't update the rename table
524    io.intRenamePorts(i).wen  := intSpecWen(i)
525    io.intRenamePorts(i).addr := uops(i).ldest(log2Ceil(IntLogicRegs) - 1, 0)
526    io.intRenamePorts(i).data := io.out(i).bits.pdest
527
528    io.fpRenamePorts(i).wen  := fpSpecWen(i)
529    io.fpRenamePorts(i).addr := uops(i).ldest(log2Ceil(FpLogicRegs) - 1, 0)
530    io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i)
531
532    io.vecRenamePorts(i).wen := vecSpecWen(i)
533    io.vecRenamePorts(i).addr := uops(i).ldest(log2Ceil(VecLogicRegs) - 1, 0)
534    io.vecRenamePorts(i).data := vecFreeList.io.allocatePhyReg(i)
535
536    io.v0RenamePorts(i).wen := v0SpecWen(i)
537    io.v0RenamePorts(i).addr := uops(i).ldest(log2Ceil(V0LogicRegs) - 1, 0)
538    io.v0RenamePorts(i).data := v0FreeList.io.allocatePhyReg(i)
539
540    io.vlRenamePorts(i).wen := vlSpecWen(i)
541    io.vlRenamePorts(i).addr := uops(i).ldest(log2Ceil(VlLogicRegs) - 1, 0)
542    io.vlRenamePorts(i).data := vlFreeList.io.allocatePhyReg(i)
543
544    // II. Free List Update
545    intFreeList.io.freeReq(i) := io.int_need_free(i)
546    intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i))
547    fpFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_F, io.rabCommits.info(i)))
548    fpFreeList.io.freePhyReg(i) := io.fp_old_pdest(i)
549    vecFreeList.io.freeReq(i)  := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V, io.rabCommits.info(i)))
550    vecFreeList.io.freePhyReg(i) := io.vec_old_pdest(i)
551    v0FreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V0, io.rabCommits.info(i)))
552    v0FreeList.io.freePhyReg(i) := io.v0_old_pdest(i)
553    vlFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_Vl, io.rabCommits.info(i)))
554    vlFreeList.io.freePhyReg(i) := io.vl_old_pdest(i)
555  }
556
557  /*
558  Debug and performance counters
559   */
560  def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = {
561    XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " +
562      p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " +
563      p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " +
564      p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " +
565      p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n"
566    )
567  }
568
569  for ((x,y) <- io.in.zip(io.out)) {
570    printRenameInfo(x, y)
571  }
572
573  io.out.map { case x =>
574    when(x.valid && x.bits.rfWen){
575      assert(x.bits.ldest =/= 0.U, "rfWen cannot be 1 when Int regfile ldest is 0")
576    }
577  }
578  val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid)
579  // bad speculation
580  val recStall = io.redirect.valid || io.rabCommits.isWalk
581  val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.rabCommits.isWalk && debugRedirect.debugIsCtrl)
582  val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.rabCommits.isWalk && debugRedirect.debugIsMemVio)
583  val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall)
584  XSPerfAccumulate("recovery_stall", recStall)
585  XSPerfAccumulate("control_recovery_stall", ctrlRecStall)
586  XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall)
587  XSPerfAccumulate("other_recovery_stall", otherRecStall)
588  // freelist stall
589  val notRecStall = !io.out.head.valid && !recStall
590  val intFlStall = notRecStall && inHeadValid && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
591  val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
592  val vecFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
593  val v0FlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
594  val vlFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
595  val multiFlStall = notRecStall && inHeadValid && (PopCount(Cat(
596    !intFreeList.io.canAllocate,
597    !fpFreeList.io.canAllocate,
598    !vecFreeList.io.canAllocate,
599    !v0FreeList.io.canAllocate,
600    !vlFreeList.io.canAllocate,
601  )) > 1.U)
602  // other stall
603  val otherStall = notRecStall && !intFlStall && !fpFlStall && !vecFlStall && !v0FlStall && !vlFlStall && !multiFlStall
604
605  io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready
606  io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits,
607    MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq(
608      ctrlRecStall  -> TopDownCounters.ControlRecoveryStall.id.U,
609      mvioRecStall  -> TopDownCounters.MemVioRecoveryStall.id.U,
610      otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U,
611      intFlStall    -> TopDownCounters.IntFlStall.id.U,
612      fpFlStall     -> TopDownCounters.FpFlStall.id.U,
613      vecFlStall    -> TopDownCounters.VecFlStall.id.U,
614      v0FlStall     -> TopDownCounters.V0FlStall.id.U,
615      vlFlStall     -> TopDownCounters.VlFlStall.id.U,
616      multiFlStall  -> TopDownCounters.MultiFlStall.id.U,
617    )
618  ))
619  io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) =>
620    out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in)
621  }
622
623  XSDebug(io.rabCommits.isWalk, p"Walk Recovery Enabled\n")
624  XSDebug(io.rabCommits.isWalk, p"validVec:${Binary(io.rabCommits.walkValid.asUInt)}\n")
625  for (i <- 0 until RabCommitWidth) {
626    val info = io.rabCommits.info(i)
627    XSDebug(io.rabCommits.isWalk && io.rabCommits.walkValid(i), p"[#$i walk info] " +
628      p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen} v0Wen:${info.v0Wen} vlWen:${info.vlWen}")
629  }
630
631  XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n")
632
633  XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid)))
634  XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire)))
635  XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready)))
636  XSPerfAccumulate("wait_cycle", !io.in.head.valid && dispatchCanAcc)
637
638  // These stall reasons could overlap each other, but we configure the priority as fellows.
639  // walk stall > dispatch stall > int freelist stall > fp freelist stall
640  private val inHeadStall = io.in.head match { case x => x.valid && !x.ready }
641  private val stallForWalk      = inHeadValid &&  io.rabCommits.isWalk
642  private val stallForDispatch  = inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc
643  private val stallForIntFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate
644  private val stallForFpFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate
645  private val stallForVecFL     = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate
646  private val stallForV0FL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate
647  private val stallForVlFL      = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate
648  XSPerfAccumulate("stall_cycle",          inHeadStall)
649  XSPerfAccumulate("stall_cycle_walk",     stallForWalk)
650  XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch)
651  XSPerfAccumulate("stall_cycle_int",      stallForIntFL)
652  XSPerfAccumulate("stall_cycle_fp",       stallForFpFL)
653  XSPerfAccumulate("stall_cycle_vec",      stallForVecFL)
654  XSPerfAccumulate("stall_cycle_vec",      stallForV0FL)
655  XSPerfAccumulate("stall_cycle_vec",      stallForVlFL)
656
657  XSPerfHistogram("in_valid_range",  PopCount(io.in.map(_.valid)),  true.B, 0, DecodeWidth + 1, 1)
658  XSPerfHistogram("in_fire_range",   PopCount(io.in.map(_.fire)),   true.B, 0, DecodeWidth + 1, 1)
659  XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1)
660  XSPerfHistogram("out_fire_range",  PopCount(io.out.map(_.fire)),  true.B, 0, DecodeWidth + 1, 1)
661
662  XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove)))
663  val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm)
664  XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load))
665
666  val renamePerf = Seq(
667    ("rename_in                  ", PopCount(io.in.map(_.valid & io.in(0).ready ))                                                               ),
668    ("rename_waitinstr           ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))                                  ),
669    ("rename_stall               ", inHeadStall),
670    ("rename_stall_cycle_walk    ", inHeadValid &&  io.rabCommits.isWalk),
671    ("rename_stall_cycle_dispatch", inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc),
672    ("rename_stall_cycle_int     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !intFreeList.io.canAllocate),
673    ("rename_stall_cycle_fp      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !fpFreeList.io.canAllocate),
674    ("rename_stall_cycle_vec     ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && v0FreeList.io.canAllocate && vlFreeList.io.canAllocate && !vecFreeList.io.canAllocate),
675    ("rename_stall_cycle_v0      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && vlFreeList.io.canAllocate && !v0FreeList.io.canAllocate),
676    ("rename_stall_cycle_vl      ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && v0FreeList.io.canAllocate && !vlFreeList.io.canAllocate),
677  )
678  val intFlPerf = intFreeList.getPerfEvents
679  val fpFlPerf = fpFreeList.getPerfEvents
680  val vecFlPerf = vecFreeList.getPerfEvents
681  val v0FlPerf = v0FreeList.getPerfEvents
682  val vlFlPerf = vlFreeList.getPerfEvents
683  val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf ++ vecFlPerf ++ v0FlPerf ++ vlFlPerf
684  generatePerfEvent()
685}
686