1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import xiangshan.backend.rob.RobPtr 25import xiangshan.backend.dispatch.PreDispatchInfo 26 27class RenameBypassInfo(implicit p: Parameters) extends XSBundle { 28 val lsrc1_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))) 29 val lsrc2_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))) 30 val lsrc3_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))) 31 val ldest_bypass = MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))) 32} 33 34class Rename(implicit p: Parameters) extends XSModule { 35 val io = IO(new Bundle() { 36 val redirect = Flipped(ValidIO(new Redirect)) 37 val flush = Input(Bool()) 38 val robCommits = Flipped(new RobCommitIO) 39 // from decode 40 val in = Vec(RenameWidth, Flipped(DecoupledIO(new CfCtrl))) 41 // to rename table 42 val intReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 43 val fpReadPorts = Vec(RenameWidth, Vec(4, Input(UInt(PhyRegIdxWidth.W)))) 44 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 45 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 46 // to dispatch1 47 val out = Vec(RenameWidth, DecoupledIO(new MicroOp)) 48 val renameBypass = Output(new RenameBypassInfo) 49 val dispatchInfo = Output(new PreDispatchInfo) 50 }) 51 52 // create free list and rat 53 val intFreeList = Module(new freelist.MEFreeList) 54 val fpFreeList = Module(new freelist.StdFreeList) 55 56 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 57 def needDestReg[T <: CfCtrl](fp: Boolean, x: T): Bool = { 58 {if(fp) x.ctrl.fpWen else x.ctrl.rfWen && (x.ctrl.ldest =/= 0.U)} 59 } 60 def needDestRegCommit[T <: RobCommitInfo](fp: Boolean, x: T): Bool = { 61 {if(fp) x.fpWen else x.rfWen && (x.ldest =/= 0.U)} 62 } 63 64 // connect [flush + redirect + walk] ports for __float point__ & __integer__ free list 65 Seq((fpFreeList, true), (intFreeList, false)).foreach{ case (fl, isFp) => 66 fl.flush := io.flush 67 fl.redirect := io.redirect.valid 68 fl.walk := io.robCommits.isWalk 69 // when isWalk, use stepBack to restore head pointer of free list 70 // (if ME enabled, stepBack of intFreeList should be useless thus optimized out) 71 fl.stepBack := PopCount(io.robCommits.valid.zip(io.robCommits.info).map{case (v, i) => v && needDestRegCommit(isFp, i)}) 72 } 73 // walk has higher priority than allocation and thus we don't use isWalk here 74 // only when both fp and int free list and dispatch1 has enough space can we do allocation 75 intFreeList.doAllocate := fpFreeList.canAllocate && io.out(0).ready 76 fpFreeList.doAllocate := intFreeList.canAllocate && io.out(0).ready 77 78 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ not walk 79 val canOut = io.out(0).ready && fpFreeList.canAllocate && intFreeList.canAllocate && !io.robCommits.isWalk 80 81 82 // speculatively assign the instruction with an robIdx 83 val validCount = PopCount(io.in.map(_.valid)) // number of instructions waiting to enter rob (from decode) 84 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 85 val lastCycleMisprediction = RegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 86 val robIdxHeadNext = Mux(io.flush, 0.U.asTypeOf(new RobPtr), // flush: clear rob 87 Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index (flush itself) 88 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 89 Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 90 /* default */ robIdxHead)))) // no instructions passed by this cycle: stick to old value 91 robIdxHead := robIdxHeadNext 92 93 /** 94 * Rename: allocate free physical register and update rename table 95 */ 96 val uops = Wire(Vec(RenameWidth, new MicroOp)) 97 uops.foreach( uop => { 98 uop.srcState(0) := DontCare 99 uop.srcState(1) := DontCare 100 uop.srcState(2) := DontCare 101 uop.robIdx := DontCare 102 uop.diffTestDebugLrScValid := DontCare 103 uop.debugInfo := DontCare 104 uop.lqIdx := DontCare 105 uop.sqIdx := DontCare 106 }) 107 108 val needFpDest = Wire(Vec(RenameWidth, Bool())) 109 val needIntDest = Wire(Vec(RenameWidth, Bool())) 110 val hasValid = Cat(io.in.map(_.valid)).orR 111 112 val isMove = io.in.map(_.bits.ctrl.isMove) 113 val isMax = intFreeList.maxVec 114 val meEnable = WireInit(VecInit(Seq.fill(RenameWidth)(false.B))) 115 val psrc_cmp = Wire(MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W)))) 116 val intPsrc = Wire(Vec(RenameWidth, UInt())) 117 118 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 119 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 120 121 // uop calculation 122 for (i <- 0 until RenameWidth) { 123 uops(i).cf := io.in(i).bits.cf 124 uops(i).ctrl := io.in(i).bits.ctrl 125 126 val inValid = io.in(i).valid 127 128 // alloc a new phy reg 129 needFpDest(i) := inValid && needDestReg(fp = true, io.in(i).bits) 130 needIntDest(i) := inValid && needDestReg(fp = false, io.in(i).bits) 131 fpFreeList.allocateReq(i) := needFpDest(i) 132 intFreeList.allocateReq(i) := needIntDest(i) 133 134 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 135 io.in(i).ready := !hasValid || canOut 136 137 // do checkpoints when a branch inst come 138 // for(fl <- Seq(fpFreeList, intFreeList)){ 139 // fl.cpReqs(i).valid := inValid 140 // fl.cpReqs(i).bits := io.in(i).bits.brTag 141 // } 142 143 uops(i).robIdx := robIdxHead + PopCount(io.in.take(i).map(_.valid)) 144 145 val intPhySrcVec = io.intReadPorts(i).take(2) 146 val intOldPdest = io.intReadPorts(i).last 147 intPsrc(i) := intPhySrcVec(0) 148 val fpPhySrcVec = io.fpReadPorts(i).take(3) 149 val fpOldPdest = io.fpReadPorts(i).last 150 uops(i).psrc(0) := Mux(uops(i).ctrl.srcType(0) === SrcType.reg, intPhySrcVec(0), fpPhySrcVec(0)) 151 uops(i).psrc(1) := Mux(uops(i).ctrl.srcType(1) === SrcType.reg, intPhySrcVec(1), fpPhySrcVec(1)) 152 uops(i).psrc(2) := fpPhySrcVec(2) 153 uops(i).old_pdest := Mux(uops(i).ctrl.rfWen, intOldPdest, fpOldPdest) 154 155 if (i == 0) { 156 // calculate meEnable 157 meEnable(i) := isMove(i) && (!isMax(intPsrc(i)) || uops(i).ctrl.lsrc(0) === 0.U) 158 } else { 159 // compare psrc0 160 psrc_cmp(i-1) := Cat((0 until i).map(j => { 161 intPsrc(i) === intPsrc(j) && io.in(i).bits.ctrl.isMove && io.in(j).bits.ctrl.isMove 162 }) /* reverse is not necessary here */) 163 164 // calculate meEnable 165 meEnable(i) := isMove(i) && (!(io.renameBypass.lsrc1_bypass(i-1).orR | psrc_cmp(i-1).orR | isMax(intPsrc(i))) || uops(i).ctrl.lsrc(0) === 0.U) 166 } 167 uops(i).eliminatedMove := meEnable(i) || (uops(i).ctrl.isMove && uops(i).ctrl.ldest === 0.U) 168 169 // send psrc of eliminated move instructions to free list and label them as eliminated 170 intFreeList.psrcOfMove(i).valid := meEnable(i) 171 intFreeList.psrcOfMove(i).bits := intPsrc(i) 172 173 // update pdest 174 uops(i).pdest := Mux(meEnable(i), intPsrc(i), // move eliminated 175 Mux(needIntDest(i), intFreeList.allocatePhyReg(i), // normal int inst 176 Mux(uops(i).ctrl.ldest===0.U && uops(i).ctrl.rfWen, 0.U // int inst with dst=r0 177 /* default */, fpFreeList.allocatePhyReg(i)))) // normal fp inst 178 179 // Assign performance counters 180 uops(i).debugInfo.renameTime := GTimer() 181 182 io.out(i).valid := io.in(i).valid && intFreeList.canAllocate && fpFreeList.canAllocate && !io.robCommits.isWalk 183 io.out(i).bits := uops(i) 184 185 // write speculative rename table 186 // we update rat later inside commit code 187 intSpecWen(i) := intFreeList.allocateReq(i) && intFreeList.canAllocate && intFreeList.doAllocate && !io.robCommits.isWalk 188 fpSpecWen(i) := fpFreeList.allocateReq(i) && fpFreeList.canAllocate && fpFreeList.doAllocate && !io.robCommits.isWalk 189 } 190 191 // We don't bypass the old_pdest from valid instructions with the same ldest currently in rename stage. 192 // Instead, we determine whether there're some dependencies between the valid instructions. 193 for (i <- 1 until RenameWidth) { 194 io.renameBypass.lsrc1_bypass(i-1) := Cat((0 until i).map(j => { 195 val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.srcType(0) === SrcType.fp 196 val intMatch = needIntDest(j) && io.in(i).bits.ctrl.srcType(0) === SrcType.reg 197 (fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc(0) 198 }).reverse) 199 io.renameBypass.lsrc2_bypass(i-1) := Cat((0 until i).map(j => { 200 val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.srcType(1) === SrcType.fp 201 val intMatch = needIntDest(j) && io.in(i).bits.ctrl.srcType(1) === SrcType.reg 202 (fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc(1) 203 }).reverse) 204 io.renameBypass.lsrc3_bypass(i-1) := Cat((0 until i).map(j => { 205 val fpMatch = needFpDest(j) && io.in(i).bits.ctrl.srcType(2) === SrcType.fp 206 val intMatch = needIntDest(j) && io.in(i).bits.ctrl.srcType(2) === SrcType.reg 207 (fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.lsrc(2) 208 }).reverse) 209 io.renameBypass.ldest_bypass(i-1) := Cat((0 until i).map(j => { 210 val fpMatch = needFpDest(j) && needFpDest(i) 211 val intMatch = needIntDest(j) && needIntDest(i) 212 (fpMatch || intMatch) && io.in(j).bits.ctrl.ldest === io.in(i).bits.ctrl.ldest 213 }).reverse) 214 } 215 216 // calculate lsq space requirement 217 val isLs = VecInit(uops.map(uop => FuType.isLoadStore(uop.ctrl.fuType))) 218 val isStore = VecInit(uops.map(uop => FuType.isStoreExu(uop.ctrl.fuType))) 219 val isAMO = VecInit(uops.map(uop => FuType.isAMO(uop.ctrl.fuType))) 220 io.dispatchInfo.lsqNeedAlloc := VecInit((0 until RenameWidth).map(i => 221 Mux(isLs(i), Mux(isStore(i) && !isAMO(i), 2.U, 1.U), 0.U))) 222 223 /** 224 * Instructions commit: update freelist and rename table 225 */ 226 for (i <- 0 until CommitWidth) { 227 228 Seq((io.intRenamePorts, false), (io.fpRenamePorts, true)) foreach { case (rat, fp) => 229 // is valid commit req and given instruction has destination register 230 val commitDestValid = io.robCommits.valid(i) && needDestRegCommit(fp, io.robCommits.info(i)) 231 XSDebug(p"isFp[${fp}]index[$i]-commitDestValid:$commitDestValid,isWalk:${io.robCommits.isWalk}\n") 232 233 /* 234 I. RAT Update 235 */ 236 237 // walk back write - restore spec state : ldest => old_pdest 238 if (fp && i < RenameWidth) { 239 // When redirect happens (mis-prediction), don't update the rename table 240 rat(i).wen := fpSpecWen(i) && !io.flush && !io.redirect.valid 241 rat(i).addr := uops(i).ctrl.ldest 242 rat(i).data := fpFreeList.allocatePhyReg(i) 243 } else if (!fp && i < RenameWidth) { 244 rat(i).wen := intSpecWen(i) && !io.flush && !io.redirect.valid 245 rat(i).addr := uops(i).ctrl.ldest 246 rat(i).data := Mux(meEnable(i), intPsrc(i), intFreeList.allocatePhyReg(i)) 247 } 248 249 /* 250 II. Free List Update 251 */ 252 if (fp) { // Float Point free list 253 fpFreeList.freeReq(i) := commitDestValid && !io.robCommits.isWalk 254 fpFreeList.freePhyReg(i) := io.robCommits.info(i).old_pdest 255 } else { // Integer free list 256 257 // during walk process: 258 // 1. for normal inst, free pdest + revert rat from ldest->pdest to ldest->old_pdest 259 // 2. for ME inst, free pdest(commit counter++) + revert rat 260 261 // conclusion: 262 // a. rat recovery has nothing to do with ME or not 263 // b. treat walk as normal commit except replace old_pdests with pdests and set io.walk to true 264 // c. ignore pdests port when walking 265 266 intFreeList.freeReq(i) := commitDestValid // walk or not walk 267 intFreeList.freePhyReg(i) := Mux(io.robCommits.isWalk, io.robCommits.info(i).pdest, io.robCommits.info(i).old_pdest) 268 intFreeList.eliminatedMove(i) := io.robCommits.info(i).eliminatedMove 269 intFreeList.multiRefPhyReg(i) := io.robCommits.info(i).pdest 270 } 271 } 272 } 273 274 275 /* 276 Debug and performance counter 277 */ 278 279 def printRenameInfo(in: DecoupledIO[CfCtrl], out: DecoupledIO[MicroOp]) = { 280 XSInfo( 281 in.valid && in.ready, 282 p"pc:${Hexadecimal(in.bits.cf.pc)} in v:${in.valid} in rdy:${in.ready} " + 283 p"lsrc(0):${in.bits.ctrl.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 284 p"lsrc(1):${in.bits.ctrl.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 285 p"lsrc(2):${in.bits.ctrl.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 286 p"ldest:${in.bits.ctrl.ldest} -> pdest:${out.bits.pdest} " + 287 p"old_pdest:${out.bits.old_pdest} " + 288 p"out v:${out.valid} r:${out.ready}\n" 289 ) 290 } 291 292 for((x,y) <- io.in.zip(io.out)){ 293 printRenameInfo(x, y) 294 } 295 296 XSDebug(io.robCommits.isWalk, p"Walk Recovery Enabled\n") 297 XSDebug(io.robCommits.isWalk, p"validVec:${Binary(io.robCommits.valid.asUInt)}\n") 298 for (i <- 0 until CommitWidth) { 299 val info = io.robCommits.info(i) 300 XSDebug(io.robCommits.isWalk && io.robCommits.valid(i), p"[#$i walk info] pc:${Hexadecimal(info.pc)} " + 301 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} " + p"eliminatedMove:${info.eliminatedMove} " + 302 p"pdest:${info.pdest} old_pdest:${info.old_pdest}\n") 303 } 304 305 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 306 XSInfo(!canOut, p"stall at rename, hasValid:${hasValid}, fpCanAlloc:${fpFreeList.canAllocate}, intCanAlloc:${intFreeList.canAllocate} dispatch1ready:${io.out(0).ready}, isWalk:${io.robCommits.isWalk}\n") 307 308 XSPerfAccumulate("in", Mux(RegNext(io.in(0).ready), PopCount(io.in.map(_.valid)), 0.U)) 309 XSPerfAccumulate("utilization", PopCount(io.in.map(_.valid))) 310 XSPerfAccumulate("waitInstr", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))) 311 XSPerfAccumulate("stall_cycle_dispatch", hasValid && !io.out(0).ready && fpFreeList.canAllocate && intFreeList.canAllocate && !io.robCommits.isWalk) 312 XSPerfAccumulate("stall_cycle_fp", hasValid && io.out(0).ready && !fpFreeList.canAllocate && intFreeList.canAllocate && !io.robCommits.isWalk) 313 XSPerfAccumulate("stall_cycle_int", hasValid && io.out(0).ready && fpFreeList.canAllocate && !intFreeList.canAllocate && !io.robCommits.isWalk) 314 XSPerfAccumulate("stall_cycle_walk", hasValid && io.out(0).ready && fpFreeList.canAllocate && intFreeList.canAllocate && io.robCommits.isWalk) 315 if (!env.FPGAPlatform) { 316 ExcitingUtils.addSource(io.robCommits.isWalk, "TMA_backendiswalk") 317 } 318 319 XSPerfAccumulate("move_instr_count", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove))) 320 XSPerfAccumulate("move_elim_enabled", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && meEnable(i)))) 321 XSPerfAccumulate("move_elim_cancelled", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i)))) 322 XSPerfAccumulate("move_elim_cancelled_psrc_bypass", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && { if (i == 0) false.B else io.renameBypass.lsrc1_bypass(i-1).orR }))) 323 XSPerfAccumulate("move_elim_cancelled_cnt_limit", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && isMax(io.out(i).bits.psrc(0))))) 324 XSPerfAccumulate("move_elim_cancelled_inc_more_than_one", PopCount(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && { if (i == 0) false.B else psrc_cmp(i-1).orR }))) 325 326 // to make sure meEnable functions as expected 327 for (i <- 0 until RenameWidth) { 328 XSDebug(io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && isMax(io.out(i).bits.psrc(0)), 329 p"ME_CANCELLED: ref counter hits max value (pc:0x${Hexadecimal(io.in(i).bits.cf.pc)})\n") 330 XSDebug(io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && { if (i == 0) false.B else io.renameBypass.lsrc1_bypass(i-1).orR }, 331 p"ME_CANCELLED: RAW dependency (pc:0x${Hexadecimal(io.in(i).bits.cf.pc)})\n") 332 XSDebug(io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i) && { if (i == 0) false.B else psrc_cmp(i-1).orR }, 333 p"ME_CANCELLED: psrc duplicates with former instruction (pc:0x${Hexadecimal(io.in(i).bits.cf.pc)})\n") 334 } 335 XSDebug(VecInit(Seq.tabulate(RenameWidth)(i => io.out(i).fire() && io.in(i).bits.ctrl.isMove && !meEnable(i))).asUInt().orR, 336 p"ME_CANCELLED: pc group [ " + (0 until RenameWidth).map(i => p"fire:${io.out(i).fire()},pc:0x${Hexadecimal(io.in(i).bits.cf.pc)} ").reduceLeft(_ + _) + p"]\n") 337 XSInfo(meEnable.asUInt().orR(), p"meEnableVec:${Binary(meEnable.asUInt)}\n") 338} 339