1package xiangshan.backend.rob 2 3import org.chipsalliance.cde.config.Parameters 4import chisel3._ 5import chisel3.util._ 6import xiangshan._ 7import utils._ 8import utility._ 9import xiangshan.backend.Bundles.DynInst 10import xiangshan.backend.{RabToVecExcpMod, RegWriteFromRab} 11import xiangshan.backend.decode.VectorConstants 12import xiangshan.backend.rename.SnapshotGenerator 13import chisel3.experimental.BundleLiterals._ 14 15class RenameBufferPtr(size: Int) extends CircularQueuePtr[RenameBufferPtr](size) { 16 def this()(implicit p: Parameters) = this(p(XSCoreParamsKey).RabSize) 17} 18 19object RenameBufferPtr { 20 def apply(flag: Boolean = false, v: Int = 0)(implicit p: Parameters): RenameBufferPtr = { 21 val ptr = Wire(new RenameBufferPtr(p(XSCoreParamsKey).RabSize)) 22 ptr.flag := flag.B 23 ptr.value := v.U 24 ptr 25 } 26} 27 28class RenameBufferEntry(implicit p: Parameters) extends XSBundle { 29 val info = new RabCommitInfo 30 val robIdx = OptionWrapper(!env.FPGAPlatform, new RobPtr) 31} 32 33class RenameBuffer(size: Int)(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 34 val io = IO(new Bundle { 35 val redirect = Input(ValidIO(new Bundle { 36 })) 37 38 val req = Vec(RenameWidth, Flipped(ValidIO(new DynInst))) 39 val fromRob = new Bundle { 40 val walkSize = Input(UInt(log2Up(size).W)) 41 val walkEnd = Input(Bool()) 42 val commitSize = Input(UInt(log2Up(size).W)) 43 val vecLoadExcp = Input(ValidIO(new Bundle{ 44 val isStrided = Bool() 45 val isVlm = Bool() 46 })) 47 } 48 49 val snpt = Input(new SnapshotPort) 50 51 val canEnq = Output(Bool()) 52 val canEnqForDispatch = Output(Bool()) 53 val enqPtrVec = Output(Vec(RenameWidth, new RenameBufferPtr)) 54 55 val commits = Output(new RabCommitIO) 56 val diffCommits = if (backendParams.basicDebugEn) Some(Output(new DiffCommitIO)) else None 57 58 val status = Output(new Bundle { 59 val walkEnd = Bool() 60 val commitEnd = Bool() 61 }) 62 val toVecExcpMod = Output(new RabToVecExcpMod) 63 }) 64 65 // alias 66 private val snptSelect = io.snpt.snptSelect 67 68 // pointer 69 private val enqPtrVec = RegInit(VecInit.tabulate(RenameWidth)(idx => RenameBufferPtr(flag = false, idx))) 70 private val enqPtr = enqPtrVec.head 71 private val enqPtrOH = RegInit(1.U(size.W)) 72 private val enqPtrOHShift = CircularShift(enqPtrOH) 73 // may shift [0, RenameWidth] steps 74 private val enqPtrOHVec = VecInit.tabulate(RenameWidth + 1)(enqPtrOHShift.left) 75 private val enqPtrVecNext = Wire(enqPtrVec.cloneType) 76 77 private val deqPtrVec = RegInit(VecInit.tabulate(RabCommitWidth)(idx => RenameBufferPtr(flag = false, idx))) 78 private val deqPtr = deqPtrVec.head 79 private val deqPtrOH = RegInit(1.U(size.W)) 80 private val deqPtrOHShift = CircularShift(deqPtrOH) 81 private val deqPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(deqPtrOHShift.left) 82 private val deqPtrVecNext = Wire(deqPtrVec.cloneType) 83 XSError(deqPtr.toOH =/= deqPtrOH, p"wrong one-hot reg between $deqPtr and $deqPtrOH") 84 85 private val walkPtr = Reg(new RenameBufferPtr) 86 private val walkPtrOH = walkPtr.toOH 87 private val walkPtrOHVec = VecInit.tabulate(RabCommitWidth + 1)(CircularShift(walkPtrOH).left) 88 private val walkPtrNext = Wire(new RenameBufferPtr) 89 90 private val walkPtrSnapshots = SnapshotGenerator(enqPtr, io.snpt.snptEnq, io.snpt.snptDeq, io.redirect.valid, io.snpt.flushVec) 91 92 val vcfgPtrOH = RegInit(1.U(size.W)) 93 val vcfgPtrOHShift = CircularShift(vcfgPtrOH) 94 // may shift [0, 2) steps 95 val vcfgPtrOHVec = VecInit.tabulate(2)(vcfgPtrOHShift.left) 96 97 val diffPtr = RegInit(0.U.asTypeOf(new RenameBufferPtr)) 98 val diffPtrNext = Wire(new RenameBufferPtr) 99 // Regs 100 val renameBuffer = Reg(Vec(size, new RenameBufferEntry)) 101 val renameBufferEntries = VecInit((0 until size) map (i => renameBuffer(i))) 102 103 val vecLoadExcp = Reg(io.fromRob.vecLoadExcp.cloneType) 104 105 private val maxLMUL = 8 106 private val vdIdxWidth = log2Up(maxLMUL + 1) 107 val currentVdIdx = Reg(UInt(vdIdxWidth.W)) // store 0~8 108 109 val s_idle :: s_special_walk :: s_walk :: Nil = Enum(3) 110 val state = RegInit(s_idle) 111 val stateNext = WireInit(state) // otherwise keep state value 112 113 private val robWalkEndReg = RegInit(false.B) 114 private val robWalkEnd = io.fromRob.walkEnd || robWalkEndReg 115 116 when(io.redirect.valid) { 117 robWalkEndReg := false.B 118 }.elsewhen(io.fromRob.walkEnd) { 119 robWalkEndReg := true.B 120 } 121 122 val realNeedAlloc = io.req.map(req => req.valid && req.bits.needWriteRf) 123 val enqCount = PopCount(realNeedAlloc) 124 val commitNum = Wire(UInt(log2Up(RabCommitWidth).W)) 125 val walkNum = Wire(UInt(log2Up(RabCommitWidth).W)) 126 commitNum := Mux(io.commits.commitValid(0), PriorityMux((0 until RabCommitWidth).map( 127 i => io.commits.commitValid(RabCommitWidth - 1 - i) -> (RabCommitWidth - i).U 128 )), 0.U) 129 walkNum := Mux(io.commits.walkValid(0), PriorityMux((0 until RabCommitWidth).map( 130 i => io.commits.walkValid(RabCommitWidth - 1 - i) -> (RabCommitWidth-i).U 131 )), 0.U) 132 val commitCount = Mux(io.commits.isCommit && !io.commits.isWalk, commitNum, 0.U) 133 val walkCount = Mux(io.commits.isWalk && !io.commits.isCommit, walkNum, 0.U) 134 val specialWalkCount = Mux(io.commits.isCommit && io.commits.isWalk, walkNum, 0.U) 135 136 // number of pair(ldest, pdest) ready to commit to arch_rat 137 val commitSize = RegInit(0.U(log2Up(size).W)) 138 val walkSize = RegInit(0.U(log2Up(size).W)) 139 val specialWalkSize = RegInit(0.U(log2Up(size).W)) 140 141 val newCommitSize = io.fromRob.commitSize 142 val newWalkSize = io.fromRob.walkSize 143 144 val commitSizeNxt = commitSize + newCommitSize - commitCount 145 val walkSizeNxt = walkSize + newWalkSize - walkCount 146 147 val newSpecialWalkSize = Mux(io.redirect.valid && !io.snpt.useSnpt, commitSizeNxt, 0.U) 148 val specialWalkSizeNext = specialWalkSize + newSpecialWalkSize - specialWalkCount 149 150 commitSize := Mux(io.redirect.valid && !io.snpt.useSnpt, 0.U, commitSizeNxt) 151 specialWalkSize := specialWalkSizeNext 152 walkSize := Mux(io.redirect.valid, 0.U, walkSizeNxt) 153 154 walkPtrNext := MuxCase(walkPtr, Seq( 155 (state === s_idle && stateNext === s_walk) -> walkPtrSnapshots(snptSelect), 156 (state === s_special_walk && stateNext === s_walk) -> deqPtrVecNext.head, 157 (state === s_walk && io.snpt.useSnpt && io.redirect.valid) -> walkPtrSnapshots(snptSelect), 158 (state === s_walk) -> (walkPtr + walkCount), 159 )) 160 161 walkPtr := walkPtrNext 162 163 val walkCandidates = VecInit(walkPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries))) 164 val commitCandidates = VecInit(deqPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries))) 165 val vcfgCandidates = VecInit(vcfgPtrOHVec.map(sel => Mux1H(sel, renameBufferEntries))) 166 167 // update diff pointer 168 diffPtrNext := diffPtr + newCommitSize 169 diffPtr := diffPtrNext 170 171 // update vcfg pointer 172 // TODO: do not use diffPtrNext here 173 vcfgPtrOH := diffPtrNext.toOH 174 175 // update enq pointer 176 val enqPtrNext = Mux( 177 state === s_walk && stateNext === s_idle, 178 walkPtrNext, 179 enqPtr + enqCount 180 ) 181 val enqPtrOHNext = Mux( 182 state === s_walk && stateNext === s_idle, 183 walkPtrNext.toOH, 184 enqPtrOHVec(enqCount) 185 ) 186 enqPtr := enqPtrNext 187 enqPtrOH := enqPtrOHNext 188 enqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := enqPtrNext + i.U } 189 enqPtrVec := enqPtrVecNext 190 191 val deqPtrSteps = Mux1H(Seq( 192 (state === s_idle) -> commitCount, 193 (state === s_special_walk) -> specialWalkCount, 194 )) 195 196 // update deq pointer 197 val deqPtrNext = deqPtr + deqPtrSteps 198 val deqPtrOHNext = deqPtrOHVec(deqPtrSteps) 199 deqPtr := deqPtrNext 200 deqPtrOH := deqPtrOHNext 201 deqPtrVecNext.zipWithIndex.map{ case(ptr, i) => ptr := deqPtrNext + i.U } 202 deqPtrVec := deqPtrVecNext 203 204 val allocatePtrVec = VecInit((0 until RenameWidth).map(i => enqPtrVec(PopCount(realNeedAlloc.take(i))).value)) 205 allocatePtrVec.zip(io.req).zip(realNeedAlloc).map{ case((allocatePtr, req), realNeedAlloc) => 206 when(realNeedAlloc){ 207 renameBuffer(allocatePtr).info := req.bits 208 renameBuffer(allocatePtr).robIdx.foreach(_ := req.bits.robIdx) 209 } 210 } 211 212 io.commits.isCommit := state === s_idle || state === s_special_walk 213 io.commits.isWalk := state === s_walk || state === s_special_walk 214 215 for(i <- 0 until RabCommitWidth) { 216 io.commits.commitValid(i) := state === s_idle && i.U < commitSize || state === s_special_walk && i.U < specialWalkSize 217 io.commits.walkValid(i) := state === s_walk && i.U < walkSize || state === s_special_walk && i.U < specialWalkSize 218 // special walk use commitPtr 219 io.commits.info(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).info, walkCandidates(i).info) 220 io.commits.robIdx.foreach(_(i) := Mux(state === s_idle || state === s_special_walk, commitCandidates(i).robIdx.get, walkCandidates(i).robIdx.get)) 221 } 222 223 private val walkEndNext = walkSizeNxt === 0.U 224 private val commitEndNext = commitSizeNxt === 0.U 225 private val specialWalkEndNext = specialWalkSize <= RabCommitWidth.U 226 // when robWalkEndReg is 1, walkSize donot increase and decrease RabCommitWidth per Cycle 227 private val walkEndNextCycle = (robWalkEndReg || io.fromRob.walkEnd && io.fromRob.walkSize === 0.U) && (walkSize <= RabCommitWidth.U) 228 // change state 229 state := stateNext 230 when(io.redirect.valid) { 231 when(io.snpt.useSnpt) { 232 stateNext := s_walk 233 }.otherwise { 234 stateNext := s_special_walk 235 vecLoadExcp := io.fromRob.vecLoadExcp 236 when(io.fromRob.vecLoadExcp.valid) { 237 currentVdIdx := 0.U 238 } 239 } 240 }.otherwise { 241 // change stateNext 242 switch(state) { 243 // this transaction is not used actually, just list all states 244 is(s_idle) { 245 stateNext := s_idle 246 } 247 is(s_special_walk) { 248 currentVdIdx := currentVdIdx + specialWalkCount 249 when(specialWalkEndNext) { 250 stateNext := s_walk 251 vecLoadExcp.valid := false.B 252 } 253 } 254 is(s_walk) { 255 when(walkEndNextCycle) { 256 stateNext := s_idle 257 } 258 } 259 } 260 } 261 262 val numValidEntries = distanceBetween(enqPtr, deqPtr) 263 val allowEnqueue = GatedValidRegNext(numValidEntries + enqCount <= (size - RenameWidth).U, true.B) 264 val allowEnqueueForDispatch = GatedValidRegNext(numValidEntries + enqCount <= (size - 2*RenameWidth).U, true.B) 265 266 io.canEnq := allowEnqueue && state === s_idle 267 io.canEnqForDispatch := allowEnqueueForDispatch && state === s_idle 268 io.enqPtrVec := enqPtrVec 269 270 io.status.walkEnd := walkEndNext 271 io.status.commitEnd := commitEndNext 272 273 for (i <- 0 until RabCommitWidth) { 274 val valid = (state === s_special_walk) && vecLoadExcp.valid && io.commits.commitValid(i) 275 io.toVecExcpMod.logicPhyRegMap(i).valid := RegNext(valid) 276 io.toVecExcpMod.logicPhyRegMap(i).bits match { 277 case x => 278 x.lreg := RegEnable(io.commits.info(i).ldest, valid) 279 x.preg := RegEnable(io.commits.info(i).pdest, valid) 280 } 281 } 282 283 // for difftest 284 io.diffCommits.foreach(_ := 0.U.asTypeOf(new DiffCommitIO)) 285 io.diffCommits.foreach(_.isCommit := true.B) 286 for(i <- 0 until RabCommitWidth * MaxUopSize) { 287 io.diffCommits.foreach(_.commitValid(i) := i.U < newCommitSize) 288 io.diffCommits.foreach(_.info(i) := renameBufferEntries((diffPtr + i.U).value).info) 289 } 290 291 XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n") 292 293 QueuePerf(RabSize, numValidEntries, numValidEntries === size.U) 294 295 if (backendParams.debugEn) { 296 dontTouch(deqPtrVec) 297 dontTouch(walkPtrNext) 298 dontTouch(walkSizeNxt) 299 dontTouch(walkEndNext) 300 dontTouch(walkEndNextCycle) 301 } 302 303 XSPerfAccumulate("s_idle_to_idle", state === s_idle && stateNext === s_idle) 304 XSPerfAccumulate("s_idle_to_swlk", state === s_idle && stateNext === s_special_walk) 305 XSPerfAccumulate("s_idle_to_walk", state === s_idle && stateNext === s_walk) 306 XSPerfAccumulate("s_swlk_to_idle", state === s_special_walk && stateNext === s_idle) 307 XSPerfAccumulate("s_swlk_to_swlk", state === s_special_walk && stateNext === s_special_walk) 308 XSPerfAccumulate("s_swlk_to_walk", state === s_special_walk && stateNext === s_walk) 309 XSPerfAccumulate("s_walk_to_idle", state === s_walk && stateNext === s_idle) 310 XSPerfAccumulate("s_walk_to_swlk", state === s_walk && stateNext === s_special_walk) 311 XSPerfAccumulate("s_walk_to_walk", state === s_walk && stateNext === s_walk) 312 313 XSPerfAccumulate("disallow_enq_cycle", !allowEnqueue) 314 XSPerfAccumulate("disallow_enq_full_cycle", numValidEntries + enqCount > (size - RenameWidth).U) 315 XSPerfAccumulate("disallow_enq_not_idle_cycle", state =/= s_idle) 316} 317