1package xiangshan.backend.issue 2 3import org.chipsalliance.cde.config.Parameters 4import chisel3._ 5import chisel3.util._ 6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 7import utility.HasPerfEvents 8import utils.OptionWrapper 9import xiangshan._ 10import xiangshan.backend.Bundles._ 11import xiangshan.backend.datapath.DataConfig._ 12import xiangshan.backend.datapath.WbConfig._ 13import xiangshan.backend.fu.FuType 14import xiangshan.backend.regfile.RfWritePortWithConfig 15import xiangshan.backend.datapath.WbConfig.V0WB 16import xiangshan.backend.regfile.VlPregParams 17import xiangshan.backend.regcache.RegCacheTagTable 18import xiangshan.mem.{LsqEnqCtrl, LsqEnqIO, SqPtr, LqPtr} 19import xiangshan.mem.Bundles.MemWaitUpdateReqBundle 20 21sealed trait SchedulerType 22 23case class IntScheduler() extends SchedulerType 24case class FpScheduler() extends SchedulerType 25case class MemScheduler() extends SchedulerType 26case class VfScheduler() extends SchedulerType 27case class NoScheduler() extends SchedulerType 28 29class Scheduler(val params: SchdBlockParams)(implicit p: Parameters) extends LazyModule with HasXSParameter { 30 override def shouldBeInlined: Boolean = false 31 32 val numIntStateWrite = backendParams.numPregWb(IntData()) 33 val numFpStateWrite = backendParams.numPregWb(FpData()) 34 val numVfStateWrite = backendParams.numPregWb(VecData()) 35 val numV0StateWrite = backendParams.numPregWb(V0Data()) 36 val numVlStateWrite = backendParams.numPregWb(VlData()) 37 38 val issueQueue = params.issueBlockParams.map(x => LazyModule(new IssueQueue(x).suggestName(x.getIQName))) 39 40 lazy val module: SchedulerImpBase = params.schdType match { 41 case IntScheduler() => new SchedulerArithImp(this)(params, p) 42 case FpScheduler() => new SchedulerArithImp(this)(params, p) 43 case MemScheduler() => new SchedulerMemImp(this)(params, p) 44 case VfScheduler() => new SchedulerArithImp(this)(params, p) 45 case _ => null 46 } 47} 48 49class SchedulerIO()(implicit params: SchdBlockParams, p: Parameters) extends XSBundle { 50 // params alias 51 private val LoadQueueSize = VirtualLoadQueueSize 52 val fromDispatchUopNum = params.issueBlockParams.filter(x => x.StdCnt == 0).map(_.numEnq).sum 53 val allIssueParams = params.issueBlockParams.filter(_.StdCnt == 0) 54 val IssueQueueDeqSum = allIssueParams.map(_.numDeq).sum 55 val maxIQSize = allIssueParams.map(_.numEntries).max 56 val fromTop = new Bundle { 57 val hartId = Input(UInt(8.W)) 58 } 59 val fromWbFuBusyTable = new Bundle{ 60 val fuBusyTableRead = MixedVec(params.issueBlockParams.map(x => Input(x.genWbFuBusyTableReadBundle))) 61 } 62 val wbFuBusyTable = MixedVec(params.issueBlockParams.map(x => Output(x.genWbFuBusyTableWriteBundle))) 63 val IQValidNumVec = Output(Vec(IssueQueueDeqSum, UInt((maxIQSize).U.getWidth.W))) 64 65 val fromCtrlBlock = new Bundle { 66 val flush = Flipped(ValidIO(new Redirect)) 67 } 68 val fromDispatch = new Bundle { 69 val uops = Vec(fromDispatchUopNum, Flipped(DecoupledIO(new DynInst))) 70 } 71 val intWriteBack = MixedVec(Vec(backendParams.numPregWb(IntData()), 72 new RfWritePortWithConfig(backendParams.intPregParams.dataCfg, backendParams.intPregParams.addrWidth))) 73 val fpWriteBack = MixedVec(Vec(backendParams.numPregWb(FpData()), 74 new RfWritePortWithConfig(backendParams.fpPregParams.dataCfg, backendParams.fpPregParams.addrWidth))) 75 val vfWriteBack = MixedVec(Vec(backendParams.numPregWb(VecData()), 76 new RfWritePortWithConfig(backendParams.vfPregParams.dataCfg, backendParams.vfPregParams.addrWidth))) 77 val v0WriteBack = MixedVec(Vec(backendParams.numPregWb(V0Data()), 78 new RfWritePortWithConfig(backendParams.v0PregParams.dataCfg, backendParams.v0PregParams.addrWidth))) 79 val vlWriteBack = MixedVec(Vec(backendParams.numPregWb(VlData()), 80 new RfWritePortWithConfig(backendParams.vlPregParams.dataCfg, backendParams.vlPregParams.addrWidth))) 81 val intWriteBackDelayed = MixedVec(Vec(backendParams.numPregWb(IntData()), 82 new RfWritePortWithConfig(backendParams.intPregParams.dataCfg, backendParams.intPregParams.addrWidth))) 83 val fpWriteBackDelayed = MixedVec(Vec(backendParams.numPregWb(FpData()), 84 new RfWritePortWithConfig(backendParams.fpPregParams.dataCfg, backendParams.fpPregParams.addrWidth))) 85 val vfWriteBackDelayed = MixedVec(Vec(backendParams.numPregWb(VecData()), 86 new RfWritePortWithConfig(backendParams.vfPregParams.dataCfg, backendParams.vfPregParams.addrWidth))) 87 val v0WriteBackDelayed = MixedVec(Vec(backendParams.numPregWb(V0Data()), 88 new RfWritePortWithConfig(backendParams.v0PregParams.dataCfg, backendParams.v0PregParams.addrWidth))) 89 val vlWriteBackDelayed = MixedVec(Vec(backendParams.numPregWb(VlData()), 90 new RfWritePortWithConfig(backendParams.vlPregParams.dataCfg, backendParams.vlPregParams.addrWidth))) 91 val toDataPathAfterDelay: MixedVec[MixedVec[DecoupledIO[IssueQueueIssueBundle]]] = MixedVec(params.issueBlockParams.map(_.genIssueDecoupledBundle)) 92 93 val vlWriteBackInfo = new Bundle { 94 val vlFromIntIsZero = Input(Bool()) 95 val vlFromIntIsVlmax = Input(Bool()) 96 val vlFromVfIsZero = Input(Bool()) 97 val vlFromVfIsVlmax = Input(Bool()) 98 } 99 100 val fromSchedulers = new Bundle { 101 val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpInValidBundle) 102 val wakeupVecDelayed: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpInValidBundle) 103 } 104 105 val toSchedulers = new Bundle { 106 val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = params.genIQWakeUpOutValidBundle 107 } 108 109 val fromDataPath = new Bundle { 110 val resp: MixedVec[MixedVec[OGRespBundle]] = MixedVec(params.issueBlockParams.map(x => Flipped(x.genOGRespBundle))) 111 val og0Cancel = Input(ExuVec()) 112 // Todo: remove this after no cancel signal from og1 113 val og1Cancel = Input(ExuVec()) 114 // replace RCIdx to Wakeup Queue 115 val replaceRCIdx = OptionWrapper(params.needWriteRegCache, Vec(params.numWriteRegCache, Input(UInt(RegCacheIdxWidth.W)))) 116 // just be compatible to old code 117 def apply(i: Int)(j: Int) = resp(i)(j) 118 } 119 120 val loadFinalIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 121 val vecLoadFinalIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.VlduCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 122 val memAddrIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 123 val vecLoadIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.VlduCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 124 125 val ldCancel = Vec(backendParams.LduCnt + backendParams.HyuCnt, Flipped(new LoadCancelIO)) 126 127 val fromMem = if (params.isMemSchd) Some(new Bundle { 128 val ldaFeedback = Flipped(Vec(params.LduCnt, new MemRSFeedbackIO)) 129 val staFeedback = Flipped(Vec(params.StaCnt, new MemRSFeedbackIO)) 130 val hyuFeedback = Flipped(Vec(params.HyuCnt, new MemRSFeedbackIO)) 131 val vstuFeedback = Flipped(Vec(params.VstuCnt, new MemRSFeedbackIO(isVector = true))) 132 val vlduFeedback = Flipped(Vec(params.VlduCnt, new MemRSFeedbackIO(isVector = true))) 133 val stIssuePtr = Input(new SqPtr()) 134 val lcommit = Input(UInt(log2Up(CommitWidth + 1).W)) 135 val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W)) // connected to `memBlock.io.sqDeq` instead of ROB 136 val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst))) 137 val lqDeqPtr = Input(new LqPtr) 138 val sqDeqPtr = Input(new SqPtr) 139 // from lsq 140 val lqCancelCnt = Input(UInt(log2Up(LoadQueueSize + 1).W)) 141 val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W)) 142 val memWaitUpdateReq = Flipped(new MemWaitUpdateReqBundle) 143 }) else None 144 val toMem = if (params.isMemSchd) Some(new Bundle { 145 val loadFastMatch = Output(Vec(params.LduCnt, new IssueQueueLoadBundle)) 146 }) else None 147 val fromOg2Resp = if(params.needOg2Resp) Some(MixedVec(params.issueBlockParams.filter(_.needOg2Resp).map(x => Flipped(x.genOG2RespBundle)))) else None 148} 149 150abstract class SchedulerImpBase(wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 151 extends LazyModuleImp(wrapper) 152 with HasXSParameter 153{ 154 val io = IO(new SchedulerIO()) 155 156 // alias 157 private val iqWakeUpInMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = 158 io.fromSchedulers.wakeupVec.map(x => (x.bits.exuIdx, x)).toMap 159 private val iqWakeUpInMapDelayed: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = 160 io.fromSchedulers.wakeupVecDelayed.map(x => (x.bits.exuIdx, x)).toMap 161 private val schdType = params.schdType 162 163 // Modules 164 val issueQueues: Seq[IssueQueueImp] = wrapper.issueQueue.map(_.module) 165 166 io.IQValidNumVec := issueQueues.filter(_.params.StdCnt == 0).map(_.io.validCntDeqVec).flatten 167 val wakeupFromIntWBVec = Wire(params.genIntWBWakeUpSinkValidBundle) 168 val wakeupFromFpWBVec = Wire(params.genFpWBWakeUpSinkValidBundle) 169 val wakeupFromVfWBVec = Wire(params.genVfWBWakeUpSinkValidBundle) 170 val wakeupFromV0WBVec = Wire(params.genV0WBWakeUpSinkValidBundle) 171 val wakeupFromVlWBVec = Wire(params.genVlWBWakeUpSinkValidBundle) 172 val wakeupFromIntWBVecDelayed = Wire(params.genIntWBWakeUpSinkValidBundle) 173 val wakeupFromFpWBVecDelayed = Wire(params.genFpWBWakeUpSinkValidBundle) 174 val wakeupFromVfWBVecDelayed = Wire(params.genVfWBWakeUpSinkValidBundle) 175 val wakeupFromV0WBVecDelayed = Wire(params.genV0WBWakeUpSinkValidBundle) 176 val wakeupFromVlWBVecDelayed = Wire(params.genVlWBWakeUpSinkValidBundle) 177 178 val wakeupFromWBVec = Seq(wakeupFromIntWBVec, wakeupFromFpWBVec, wakeupFromVfWBVec, wakeupFromV0WBVec, wakeupFromVlWBVec) 179 val allWriteBack = Seq(io.intWriteBack, io.fpWriteBack, io.vfWriteBack, io.v0WriteBack, io.vlWriteBack) 180 wakeupFromWBVec.zip(allWriteBack).map{ case (sinks, sources) => 181 sinks.zip(sources).map{ case (sink, source) => 182 sink.valid := source.wen 183 sink.bits.rfWen := source.intWen 184 sink.bits.fpWen := source.fpWen 185 sink.bits.vecWen := source.vecWen 186 sink.bits.v0Wen := source.v0Wen 187 sink.bits.vlWen := source.vlWen 188 sink.bits.pdest := source.addr 189 } 190 } 191 192 val wakeupFromWBVecDelayed = Seq(wakeupFromIntWBVecDelayed, wakeupFromFpWBVecDelayed, wakeupFromVfWBVecDelayed, wakeupFromV0WBVecDelayed, wakeupFromVlWBVecDelayed) 193 val allWriteBackDelayed = Seq(io.intWriteBackDelayed, io.fpWriteBackDelayed, io.vfWriteBackDelayed, io.v0WriteBackDelayed, io.vlWriteBackDelayed) 194 wakeupFromWBVecDelayed.zip(allWriteBackDelayed).map { case (sinks, sources) => 195 sinks.zip(sources).map { case (sink, source) => 196 sink.valid := source.wen 197 sink.bits.rfWen := source.intWen 198 sink.bits.fpWen := source.fpWen 199 sink.bits.vecWen := source.vecWen 200 sink.bits.v0Wen := source.v0Wen 201 sink.bits.vlWen := source.vlWen 202 sink.bits.pdest := source.addr 203 } 204 } 205 // Connect bundles having the same wakeup source 206 issueQueues.zipWithIndex.foreach { case(iq, i) => 207 iq.io.wakeupFromIQ.foreach { wakeUp => 208 val wakeUpIn = iqWakeUpInMap(wakeUp.bits.exuIdx) 209 val exuIdx = wakeUp.bits.exuIdx 210 println(s"[Backend] Connect wakeup exuIdx ${exuIdx}") 211 connectSamePort(wakeUp,wakeUpIn) 212 backendParams.connectWakeup(exuIdx) 213 if (backendParams.isCopyPdest(exuIdx)) { 214 println(s"[Backend] exuIdx ${exuIdx} use pdestCopy ${backendParams.getCopyPdestIndex(exuIdx)}") 215 wakeUp.bits.pdest := wakeUpIn.bits.pdestCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 216 if (wakeUpIn.bits.rfWenCopy.nonEmpty) wakeUp.bits.rfWen := wakeUpIn.bits.rfWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 217 if (wakeUpIn.bits.fpWenCopy.nonEmpty) wakeUp.bits.fpWen := wakeUpIn.bits.fpWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 218 if (wakeUpIn.bits.vecWenCopy.nonEmpty) wakeUp.bits.vecWen := wakeUpIn.bits.vecWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 219 if (wakeUpIn.bits.v0WenCopy.nonEmpty) wakeUp.bits.v0Wen := wakeUpIn.bits.v0WenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 220 if (wakeUpIn.bits.vlWenCopy.nonEmpty) wakeUp.bits.vlWen := wakeUpIn.bits.vlWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 221 if (wakeUpIn.bits.loadDependencyCopy.nonEmpty) wakeUp.bits.loadDependency := wakeUpIn.bits.loadDependencyCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 222 } 223 if (iq.params.numIntSrc == 0) wakeUp.bits.rfWen := false.B 224 if (iq.params.numFpSrc == 0) wakeUp.bits.fpWen := false.B 225 if (iq.params.numVfSrc == 0) wakeUp.bits.vecWen := false.B 226 if (iq.params.numV0Src == 0) wakeUp.bits.v0Wen := false.B 227 if (iq.params.numVlSrc == 0) wakeUp.bits.vlWen := false.B 228 } 229 iq.io.wakeupFromIQDelayed.foreach { wakeUp => 230 val wakeUpIn = iqWakeUpInMapDelayed(wakeUp.bits.exuIdx) 231 connectSamePort(wakeUp, wakeUpIn) 232 if (iq.params.numIntSrc == 0) wakeUp.bits.rfWen := false.B 233 if (iq.params.numFpSrc == 0) wakeUp.bits.fpWen := false.B 234 if (iq.params.numVfSrc == 0) wakeUp.bits.vecWen := false.B 235 if (iq.params.numV0Src == 0) wakeUp.bits.v0Wen := false.B 236 if (iq.params.numVlSrc == 0) wakeUp.bits.vlWen := false.B 237 } 238 iq.io.og0Cancel := io.fromDataPath.og0Cancel 239 iq.io.og1Cancel := io.fromDataPath.og1Cancel 240 if (iq.params.needLoadDependency) 241 iq.io.ldCancel := io.ldCancel 242 else 243 iq.io.ldCancel := 0.U.asTypeOf(io.ldCancel) 244 } 245 246 // connect the vl writeback informatino to the issue queues 247 issueQueues.zipWithIndex.foreach { case(iq, i) => 248 iq.io.vlFromIntIsVlmax := io.vlWriteBackInfo.vlFromIntIsVlmax 249 iq.io.vlFromIntIsZero := io.vlWriteBackInfo.vlFromIntIsZero 250 iq.io.vlFromVfIsVlmax := io.vlWriteBackInfo.vlFromVfIsVlmax 251 iq.io.vlFromVfIsZero := io.vlWriteBackInfo.vlFromVfIsZero 252 } 253 254 private val iqWakeUpOutMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = 255 issueQueues.flatMap(_.io.wakeupToIQ) 256 .map(x => (x.bits.exuIdx, x)) 257 .toMap 258 259 // Connect bundles having the same wakeup source 260 io.toSchedulers.wakeupVec.foreach { wakeUp => 261 wakeUp := iqWakeUpOutMap(wakeUp.bits.exuIdx) 262 } 263 264 io.toDataPathAfterDelay.zipWithIndex.foreach { case (toDpDy, i) => 265 toDpDy <> issueQueues(i).io.deqDelay 266 } 267 268 // Response 269 issueQueues.zipWithIndex.foreach { case (iq, i) => 270 iq.io.og0Resp.zipWithIndex.foreach { case (og0Resp, j) => 271 og0Resp := io.fromDataPath(i)(j).og0resp 272 } 273 iq.io.og1Resp.zipWithIndex.foreach { case (og1Resp, j) => 274 og1Resp := io.fromDataPath(i)(j).og1resp 275 } 276 iq.io.finalIssueResp.foreach(_.zipWithIndex.foreach { case (finalIssueResp, j) => 277 if (io.loadFinalIssueResp(i).isDefinedAt(j) && iq.params.isLdAddrIQ) { 278 finalIssueResp := io.loadFinalIssueResp(i)(j) 279 } else if (io.vecLoadFinalIssueResp(i).isDefinedAt(j) && iq.params.isVecLduIQ) { 280 finalIssueResp := io.vecLoadFinalIssueResp(i)(j) 281 } 282 else { 283 finalIssueResp := 0.U.asTypeOf(finalIssueResp) 284 } 285 }) 286 iq.io.memAddrIssueResp.foreach(_.zipWithIndex.foreach { case (memAddrIssueResp, j) => 287 if (io.memAddrIssueResp(i).isDefinedAt(j)) { 288 memAddrIssueResp := io.memAddrIssueResp(i)(j) 289 } else { 290 memAddrIssueResp := 0.U.asTypeOf(memAddrIssueResp) 291 } 292 }) 293 iq.io.vecLoadIssueResp.foreach(_.zipWithIndex.foreach { case (resp, deqIdx) => 294 resp := io.vecLoadIssueResp(i)(deqIdx) 295 }) 296 iq.io.wbBusyTableRead := io.fromWbFuBusyTable.fuBusyTableRead(i) 297 io.wbFuBusyTable(i) := iq.io.wbBusyTableWrite 298 iq.io.replaceRCIdx.foreach(x => x := 0.U.asTypeOf(x)) 299 } 300 if (params.needOg2Resp) { 301 issueQueues.filter(_.params.needOg2Resp).zip(io.fromOg2Resp.get).foreach{ case (iq, og2RespVec) => 302 iq.io.og2Resp.get.zip(og2RespVec).foreach{ case (iqOg2Resp, og2Resp) => 303 iqOg2Resp := og2Resp 304 } 305 } 306 } 307 308 // Connect each replace RCIdx to IQ 309 if (params.needWriteRegCache) { 310 val iqReplaceRCIdxVec = issueQueues.filter(_.params.needWriteRegCache).flatMap{ iq => 311 iq.params.allExuParams.zip(iq.io.replaceRCIdx.get).filter(_._1.needWriteRegCache).map(_._2) 312 } 313 iqReplaceRCIdxVec.zip(io.fromDataPath.replaceRCIdx.get).foreach{ case (iq, in) => 314 iq := in 315 } 316 317 println(s"[Scheduler] numWriteRegCache: ${params.numWriteRegCache}") 318 println(s"[Scheduler] iqReplaceRCIdxVec: ${iqReplaceRCIdxVec.size}") 319 } 320 321 // perfEvent 322 val lastCycleIqEnqFireVec = RegNext(VecInit(issueQueues.map(_.io.enq.map(_.fire)).flatten)) 323 val lastCycleIqFullVec = RegNext(VecInit(issueQueues.map(_.io.enq.head.ready))) 324 325 val issueQueueFullVecPerf = issueQueues.zip(lastCycleIqFullVec)map{ case (iq, full) => (iq.params.getIQName + s"_full", full) } 326 val basePerfEvents = Seq( 327 ("issueQueue_enq_fire_cnt", PopCount(lastCycleIqEnqFireVec) ) 328 ) ++ issueQueueFullVecPerf 329 330 println(s"[Scheduler] io.fromSchedulers.wakeupVec: ${io.fromSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}") 331 println(s"[Scheduler] iqWakeUpInKeys: ${iqWakeUpInMap.keys}") 332 333 println(s"[Scheduler] iqWakeUpOutKeys: ${iqWakeUpOutMap.keys}") 334 println(s"[Scheduler] io.toSchedulers.wakeupVec: ${io.toSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}") 335} 336 337class SchedulerArithImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 338 extends SchedulerImpBase(wrapper) 339 with HasXSParameter 340 with HasPerfEvents 341{ 342 val issueQueuesUopIn = issueQueues.map(_.io.enq).flatten 343 issueQueuesUopIn.zip(io.fromDispatch.uops).map(x => x._1 <> x._2) 344 issueQueues.zipWithIndex.foreach { case (iq, i) => 345 iq.io.flush <> io.fromCtrlBlock.flush 346 if (!iq.params.needLoadDependency) { 347 iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency)) 348 } 349 val intWBIQ = params.schdType match { 350 case IntScheduler() => wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) 351 case FpScheduler() => wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) 352 case VfScheduler() => (wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 353 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 354 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1)) 355 case _ => null 356 } 357 val intWBIQDelayed = params.schdType match { 358 case IntScheduler() => wakeupFromIntWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) 359 case FpScheduler() => wakeupFromFpWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) 360 case VfScheduler() => (wakeupFromVfWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 361 wakeupFromV0WBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 362 wakeupFromVlWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1)) 363 case _ => null 364 } 365 iq.io.wakeupFromWB.zip(intWBIQ).foreach{ case (sink, source) => sink := source} 366 iq.io.wakeupFromWBDelayed.zip(intWBIQDelayed).foreach{ case (sink, source) => sink := source} 367 } 368 369 val perfEvents = basePerfEvents 370 generatePerfEvent() 371} 372 373// FIXME: Vector mem instructions may not be handled properly! 374class SchedulerMemImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 375 extends SchedulerImpBase(wrapper) 376 with HasXSParameter 377 with HasPerfEvents 378{ 379 380 val issueQueuesUopIn = issueQueues.filter(_.params.StdCnt == 0).map(_.io.enq).flatten 381 issueQueuesUopIn.zip(io.fromDispatch.uops).map(x => x._1 <> x._2) 382 val noStdExuParams = params.issueBlockParams.map(x => Seq.fill(x.numEnq)(x.exuBlockParams)).flatten.filter{x => x.map(!_.hasStdFu).reduce(_ && _)} 383 val staIdx = noStdExuParams.zipWithIndex.filter{x => x._1.map(_.hasStoreAddrFu).reduce(_ || _)}.map(_._2) 384 val staReady = issueQueues.filter(iq => iq.params.StaCnt > 0).map(_.io.enq.map(_.ready)).flatten 385 val stdReady = issueQueues.filter(iq => iq.params.StdCnt > 0).map(_.io.enq.map(_.ready)).flatten 386 staIdx.zipWithIndex.map{ case (sta, i) => { 387 io.fromDispatch.uops(sta).ready := staReady(i) && stdReady(i) 388 }} 389 issueQueues.filter(iq => iq.params.StaCnt > 0).map(_.io.enq).flatten.zipWithIndex.map{ case (iq, idx) => 390 iq.valid := io.fromDispatch.uops(staIdx(idx)).valid && !io.fromDispatch.uops(staIdx(idx)).bits.isDropAmocasSta 391 } 392 val staValidFromDispatch = staIdx.map(idx => io.fromDispatch.uops(idx).valid) 393 val memAddrIQs = issueQueues.filter(_.params.isMemAddrIQ) 394 val stAddrIQs = issueQueues.filter(iq => iq.params.StaCnt > 0) // included in memAddrIQs 395 val ldAddrIQs = issueQueues.filter(iq => iq.params.LduCnt > 0) 396 val stDataIQs = issueQueues.filter(iq => iq.params.StdCnt > 0) 397 val vecMemIQs = issueQueues.filter(_.params.isVecMemIQ) 398 val (hyuIQs, hyuIQIdxs) = issueQueues.zipWithIndex.filter(_._1.params.HyuCnt > 0).unzip 399 400 println(s"[SchedulerMemImp] memAddrIQs.size: ${memAddrIQs.size}, enq.size: ${memAddrIQs.map(_.io.enq.size).sum}") 401 println(s"[SchedulerMemImp] stAddrIQs.size: ${stAddrIQs.size }, enq.size: ${stAddrIQs.map(_.io.enq.size).sum}") 402 println(s"[SchedulerMemImp] ldAddrIQs.size: ${ldAddrIQs.size }, enq.size: ${ldAddrIQs.map(_.io.enq.size).sum}") 403 println(s"[SchedulerMemImp] stDataIQs.size: ${stDataIQs.size }, enq.size: ${stDataIQs.map(_.io.enq.size).sum}") 404 println(s"[SchedulerMemImp] hyuIQs.size: ${hyuIQs.size }, enq.size: ${hyuIQs.map(_.io.enq.size).sum}") 405 require(memAddrIQs.nonEmpty && stDataIQs.nonEmpty) 406 407 io.toMem.get.loadFastMatch := 0.U.asTypeOf(io.toMem.get.loadFastMatch) // TODO: is still needed? 408 409 private val loadWakeUp = issueQueues.filter(_.params.LdExuCnt > 0).map(_.asInstanceOf[IssueQueueMemAddrImp].io.memIO.get.loadWakeUp).flatten 410 require(loadWakeUp.length == io.fromMem.get.wakeup.length) 411 loadWakeUp.zip(io.fromMem.get.wakeup).foreach(x => x._1 := x._2) 412 413 memAddrIQs.zipWithIndex.foreach { case (iq, i) => 414 iq.io.flush <> io.fromCtrlBlock.flush 415 if (!iq.params.needLoadDependency) { 416 iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency)) 417 } 418 iq.io.wakeupFromWB.zip( 419 wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 420 wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 421 wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 422 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 423 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1) 424 ).foreach{ case (sink, source) => sink := source} 425 iq.io.wakeupFromWBDelayed.zip( 426 wakeupFromIntWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 427 wakeupFromFpWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 428 wakeupFromVfWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 429 wakeupFromV0WBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 430 wakeupFromVlWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1) 431 ).foreach { case (sink, source) => sink := source } 432 } 433 434 ldAddrIQs.zipWithIndex.foreach { 435 case (imp: IssueQueueMemAddrImp, i) => 436 imp.io.memIO.get.feedbackIO.head := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO.head) 437 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 438 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 439 case _ => 440 } 441 442 stAddrIQs.zipWithIndex.foreach { 443 case (imp: IssueQueueMemAddrImp, i) => 444 imp.io.memIO.get.feedbackIO.head := io.fromMem.get.staFeedback(i) 445 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 446 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 447 case _ => 448 } 449 450 hyuIQs.zip(hyuIQIdxs).foreach { 451 case (imp: IssueQueueMemAddrImp, idx) => 452 imp.io.memIO.get.feedbackIO.head := io.fromMem.get.hyuFeedback.head 453 imp.io.memIO.get.feedbackIO(1) := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO(1)) 454 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 455 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 456 // TODO: refactor ditry code 457 imp.io.deqDelay(1).ready := false.B 458 io.toDataPathAfterDelay(idx)(1).valid := false.B 459 io.toDataPathAfterDelay(idx)(1).bits := 0.U.asTypeOf(io.toDataPathAfterDelay(idx)(1).bits) 460 case _ => 461 } 462 463 private val staIdxSeq = (stAddrIQs).map(iq => iq.params.idxInSchBlk) 464 private val hyaIdxSeq = (hyuIQs).map(iq => iq.params.idxInSchBlk) 465 466 println(s"[SchedulerMemImp] sta iq idx in memSchdBlock: $staIdxSeq") 467 println(s"[SchedulerMemImp] hya iq idx in memSchdBlock: $hyaIdxSeq") 468 469 private val staEnqs = stAddrIQs.map(_.io.enq).flatten 470 private val stdEnqs = stDataIQs.map(_.io.enq).flatten.take(staEnqs.size) 471 private val hyaEnqs = hyuIQs.map(_.io.enq).flatten 472 private val hydEnqs = stDataIQs.map(_.io.enq).flatten.drop(staEnqs.size) 473 474 require(staEnqs.size == stdEnqs.size, s"number of enq ports of store address IQs(${staEnqs.size}) " + 475 s"should be equal to number of enq ports of store data IQs(${stdEnqs.size})") 476 477 require(hyaEnqs.size == hydEnqs.size, s"number of enq ports of hybrid address IQs(${hyaEnqs.size}) " + 478 s"should be equal to number of enq ports of hybrid data IQs(${hydEnqs.size})") 479 480 stDataIQs.zipWithIndex.foreach { case (iq, i) => 481 iq.io.flush <> io.fromCtrlBlock.flush 482 iq.io.wakeupFromWB.zip( 483 wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 484 wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 485 wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 486 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 487 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 488 ).foreach{ case (sink, source) => sink := source} 489 iq.io.wakeupFromWBDelayed.zip( 490 wakeupFromIntWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 491 wakeupFromFpWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 492 wakeupFromVfWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 493 wakeupFromV0WBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 494 wakeupFromVlWBVecDelayed.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 495 ).foreach { case (sink, source) => sink := source } 496 // here disable fp load fast wakeup to std, and no FEX wakeup to std 497 iq.io.wakeupFromIQ.map(_.bits.fpWen := false.B) 498 } 499 500 (stdEnqs ++ hydEnqs).zip(staEnqs ++ hyaEnqs).zipWithIndex.foreach { case ((stdIQEnq, staIQEnq), i) => 501 stdIQEnq.valid := staValidFromDispatch(i) 502 stdIQEnq.bits := staIQEnq.bits 503 // Store data reuses store addr src(1) in dispatch2iq 504 // [dispatch2iq] --src*------src*(0)--> [staIQ|hyaIQ] 505 // \ 506 // ---src*(1)--> [stdIQ] 507 // Since the src(1) of sta is easier to get, stdIQEnq.bits.src*(0) is assigned to staIQEnq.bits.src*(1) 508 // instead of dispatch2Iq.io.out(x).bits.src*(1) 509 val stdIdx = 1 510 stdIQEnq.bits.srcState(0) := staIQEnq.bits.srcState(stdIdx) 511 stdIQEnq.bits.srcLoadDependency(0) := staIQEnq.bits.srcLoadDependency(stdIdx) 512 stdIQEnq.bits.srcType(0) := staIQEnq.bits.srcType(stdIdx) 513 stdIQEnq.bits.psrc(0) := staIQEnq.bits.psrc(stdIdx) 514 stdIQEnq.bits.sqIdx := staIQEnq.bits.sqIdx 515 stdIQEnq.bits.useRegCache(0) := staIQEnq.bits.useRegCache(stdIdx) 516 stdIQEnq.bits.regCacheIdx(0) := staIQEnq.bits.regCacheIdx(stdIdx) 517 } 518 519 vecMemIQs.foreach { 520 case imp: IssueQueueVecMemImp => 521 imp.io.memIO.get.sqDeqPtr.foreach(_ := io.fromMem.get.sqDeqPtr) 522 imp.io.memIO.get.lqDeqPtr.foreach(_ := io.fromMem.get.lqDeqPtr) 523 // not used 524 //imp.io.memIO.get.feedbackIO.head := io.fromMem.get.vstuFeedback.head // only vector store replay 525 // maybe not used 526 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 527 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 528 imp.io.wakeupFromWB.zip( 529 wakeupFromIntWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 530 wakeupFromFpWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 531 wakeupFromVfWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 532 wakeupFromV0WBVec.zipWithIndex.filter(x => imp.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 533 wakeupFromVlWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 534 ).foreach{ case (sink, source) => sink := source} 535 imp.io.wakeupFromWBDelayed.zip( 536 wakeupFromIntWBVecDelayed.zipWithIndex.filter(x => imp.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 537 wakeupFromFpWBVecDelayed.zipWithIndex.filter(x => imp.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 538 wakeupFromVfWBVecDelayed.zipWithIndex.filter(x => imp.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 539 wakeupFromV0WBVecDelayed.zipWithIndex.filter(x => imp.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 540 wakeupFromVlWBVecDelayed.zipWithIndex.filter(x => imp.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 541 ).foreach { case (sink, source) => sink := source } 542 543 case _ => 544 } 545 val vecMemFeedbackIO: Seq[MemRSFeedbackIO] = vecMemIQs.map { 546 case imp: IssueQueueVecMemImp => 547 imp.io.memIO.get.feedbackIO 548 }.flatten 549 assert(vecMemFeedbackIO.size == io.fromMem.get.vstuFeedback.size, "vecMemFeedback size dont match!") 550 vecMemFeedbackIO.zip(io.fromMem.get.vstuFeedback).foreach{ 551 case (sink, source) => 552 sink := source 553 } 554 555 val perfEvents = basePerfEvents 556 generatePerfEvent() 557} 558