xref: /XiangShan/src/main/scala/xiangshan/backend/CtrlBlock.scala (revision 4c7680e068fa5d78388788d8bcc46893b51f56bb)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
23import utility._
24import utils._
25import xiangshan.ExceptionNO._
26import xiangshan._
27import xiangshan.backend.Bundles.{DecodedInst, DynInst, ExceptionInfo, ExuOutput}
28import xiangshan.backend.ctrlblock.{DebugLSIO, DebugLsInfoBundle, LsTopdownInfo, MemCtrl, RedirectGenerator}
29import xiangshan.backend.datapath.DataConfig.VAddrData
30import xiangshan.backend.decode.{DecodeStage, FusionDecoder}
31import xiangshan.backend.dispatch.{CoreDispatchTopDownIO, Dispatch, DispatchQueue}
32import xiangshan.backend.fu.PFEvent
33import xiangshan.backend.fu.vector.Bundles.VType
34import xiangshan.backend.rename.{Rename, RenameTableWrapper, SnapshotGenerator}
35import xiangshan.backend.rob.{Rob, RobCSRIO, RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
36import xiangshan.frontend.{FtqPtr, FtqRead, Ftq_RF_Components}
37import xiangshan.mem.{LqPtr, LsqEnqIO}
38
39class CtrlToFtqIO(implicit p: Parameters) extends XSBundle {
40  val rob_commits = Vec(CommitWidth, Valid(new RobCommitInfo))
41  val redirect = Valid(new Redirect)
42  val ftqIdxAhead = Vec(BackendRedirectNum, Valid(new FtqPtr))
43  val ftqIdxSelOH = Valid(UInt((BackendRedirectNum).W))
44}
45
46class CtrlBlock(params: BackendParams)(implicit p: Parameters) extends LazyModule {
47  override def shouldBeInlined: Boolean = false
48
49  val rob = LazyModule(new Rob(params))
50
51  lazy val module = new CtrlBlockImp(this)(p, params)
52
53}
54
55class CtrlBlockImp(
56  override val wrapper: CtrlBlock
57)(implicit
58  p: Parameters,
59  params: BackendParams
60) extends LazyModuleImp(wrapper)
61  with HasXSParameter
62  with HasCircularQueuePtrHelper
63  with HasPerfEvents
64{
65  val pcMemRdIndexes = new NamedIndexes(Seq(
66    "exu"       -> params.numPcReadPort,
67    "redirect"  -> 1,
68    "memPred"   -> 1,
69    "robFlush"  -> 1,
70    "load"      -> params.LduCnt,
71    "hybrid"    -> params.HyuCnt,
72    "store"     -> (if(EnableStorePrefetchSMS) params.StaCnt else 0)
73  ))
74
75  private val numPcMemReadForExu = params.numPcReadPort
76  private val numPcMemRead = pcMemRdIndexes.maxIdx
77
78  println(s"pcMem read num: $numPcMemRead")
79  println(s"pcMem read num for exu: $numPcMemReadForExu")
80
81  val io = IO(new CtrlBlockIO())
82
83  val decode = Module(new DecodeStage)
84  val fusionDecoder = Module(new FusionDecoder)
85  val rat = Module(new RenameTableWrapper)
86  val rename = Module(new Rename)
87  val dispatch = Module(new Dispatch)
88  val intDq = Module(new DispatchQueue(dpParams.IntDqSize, RenameWidth, dpParams.IntDqDeqWidth))
89  val fpDq = Module(new DispatchQueue(dpParams.FpDqSize, RenameWidth, dpParams.FpDqDeqWidth))
90  val lsDq = Module(new DispatchQueue(dpParams.LsDqSize, RenameWidth, dpParams.LsDqDeqWidth))
91  val redirectGen = Module(new RedirectGenerator)
92  private val pcMem = Module(new SyncDataModuleTemplate(new Ftq_RF_Components, FtqSize, numPcMemRead, 1, "BackendPC"))
93  private val rob = wrapper.rob.module
94  private val memCtrl = Module(new MemCtrl(params))
95
96  private val disableFusion = decode.io.csrCtrl.singlestep || !decode.io.csrCtrl.fusion_enable
97
98  private val s0_robFlushRedirect = rob.io.flushOut
99  private val s1_robFlushRedirect = Wire(Valid(new Redirect))
100  s1_robFlushRedirect.valid := RegNext(s0_robFlushRedirect.valid)
101  s1_robFlushRedirect.bits := RegEnable(s0_robFlushRedirect.bits, s0_robFlushRedirect.valid)
102
103  pcMem.io.raddr(pcMemRdIndexes("robFlush").head) := s0_robFlushRedirect.bits.ftqIdx.value
104  private val s1_robFlushPc = pcMem.io.rdata(pcMemRdIndexes("robFlush").head).getPc(RegEnable(s0_robFlushRedirect.bits.ftqOffset, s0_robFlushRedirect.valid))
105  private val s3_redirectGen = redirectGen.io.stage2Redirect
106  private val s1_s3_redirect = Mux(s1_robFlushRedirect.valid, s1_robFlushRedirect, s3_redirectGen)
107  private val s2_s4_pendingRedirectValid = RegInit(false.B)
108  when (s1_s3_redirect.valid) {
109    s2_s4_pendingRedirectValid := true.B
110  }.elsewhen (RegNext(io.frontend.toFtq.redirect.valid)) {
111    s2_s4_pendingRedirectValid := false.B
112  }
113
114  // Redirect will be RegNext at ExuBlocks and IssueBlocks
115  val s2_s4_redirect = RegNextWithEnable(s1_s3_redirect)
116  val s3_s5_redirect = RegNextWithEnable(s2_s4_redirect)
117
118  private val delayedNotFlushedWriteBack = io.fromWB.wbData.map(x => {
119    val valid = x.valid
120    val killedByOlder = x.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect, s3_s5_redirect))
121    val delayed = Wire(Valid(new ExuOutput(x.bits.params)))
122    delayed.valid := RegNext(valid && !killedByOlder)
123    delayed.bits := RegEnable(x.bits, x.valid)
124    delayed.bits.debugInfo.writebackTime := GTimer()
125    delayed
126  }).toSeq
127
128  private val exuPredecode = VecInit(
129    delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => x.bits.predecodeInfo.get).toSeq
130  )
131
132  private val exuRedirects: Seq[ValidIO[Redirect]] = delayedNotFlushedWriteBack.filter(_.bits.redirect.nonEmpty).map(x => {
133    val out = Wire(Valid(new Redirect()))
134    out.valid := x.valid && x.bits.redirect.get.valid && x.bits.redirect.get.bits.cfiUpdate.isMisPred
135    out.bits := x.bits.redirect.get.bits
136    out.bits.debugIsCtrl := true.B
137    out.bits.debugIsMemVio := false.B
138    out
139  }).toSeq
140
141  private val memViolation = io.fromMem.violation
142  val loadReplay = Wire(ValidIO(new Redirect))
143  loadReplay.valid := RegNext(memViolation.valid &&
144    !memViolation.bits.robIdx.needFlush(Seq(s1_s3_redirect, s2_s4_redirect))
145  )
146  loadReplay.bits := RegEnable(memViolation.bits, memViolation.valid)
147  loadReplay.bits.debugIsCtrl := false.B
148  loadReplay.bits.debugIsMemVio := true.B
149
150  val pdestReverse = rob.io.commits.info.map(info => info.pdest).reverse
151
152  pcMem.io.raddr(pcMemRdIndexes("redirect").head) := redirectGen.io.redirectPcRead.ptr.value
153  redirectGen.io.redirectPcRead.data := pcMem.io.rdata(pcMemRdIndexes("redirect").head).getPc(RegNext(redirectGen.io.redirectPcRead.offset))
154  pcMem.io.raddr(pcMemRdIndexes("memPred").head) := redirectGen.io.memPredPcRead.ptr.value
155  redirectGen.io.memPredPcRead.data := pcMem.io.rdata(pcMemRdIndexes("memPred").head).getPc(RegNext(redirectGen.io.memPredPcRead.offset))
156
157  for ((pcMemIdx, i) <- pcMemRdIndexes("load").zipWithIndex) {
158    pcMem.io.raddr(pcMemIdx) := io.memLdPcRead(i).ptr.value
159    io.memLdPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memLdPcRead(i).offset))
160  }
161
162  for ((pcMemIdx, i) <- pcMemRdIndexes("hybrid").zipWithIndex) {
163    pcMem.io.raddr(pcMemIdx) := io.memHyPcRead(i).ptr.value
164    io.memHyPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memHyPcRead(i).offset))
165  }
166
167  if (EnableStorePrefetchSMS) {
168    for ((pcMemIdx, i) <- pcMemRdIndexes("store").zipWithIndex) {
169      pcMem.io.raddr(pcMemIdx) := io.memStPcRead(i).ptr.value
170      io.memStPcRead(i).data := pcMem.io.rdata(pcMemIdx).getPc(RegNext(io.memStPcRead(i).offset))
171    }
172  } else {
173    io.memStPcRead.foreach(_.data := 0.U)
174  }
175
176  redirectGen.io.hartId := io.fromTop.hartId
177  redirectGen.io.exuRedirect := exuRedirects.toSeq
178  redirectGen.io.exuOutPredecode := exuPredecode // guarded by exuRedirect.valid
179  redirectGen.io.loadReplay <> loadReplay
180
181  redirectGen.io.robFlush := s1_robFlushRedirect.valid
182
183  val s5_flushFromRobValidAhead = DelayN(s1_robFlushRedirect.valid, 4)
184  val s6_flushFromRobValid = RegNext(s5_flushFromRobValidAhead)
185  val frontendFlushBits = RegEnable(s1_robFlushRedirect.bits, s1_robFlushRedirect.valid) // ??
186  // When ROB commits an instruction with a flush, we notify the frontend of the flush without the commit.
187  // Flushes to frontend may be delayed by some cycles and commit before flush causes errors.
188  // Thus, we make all flush reasons to behave the same as exceptions for frontend.
189  for (i <- 0 until CommitWidth) {
190    // why flushOut: instructions with flushPipe are not commited to frontend
191    // If we commit them to frontend, it will cause flush after commit, which is not acceptable by frontend.
192    val s1_isCommit = rob.io.commits.commitValid(i) && rob.io.commits.isCommit && !s0_robFlushRedirect.valid
193    io.frontend.toFtq.rob_commits(i).valid := RegNext(s1_isCommit)
194    io.frontend.toFtq.rob_commits(i).bits := RegEnable(rob.io.commits.info(i), s1_isCommit)
195  }
196  io.frontend.toFtq.redirect.valid := s6_flushFromRobValid || s3_redirectGen.valid
197  io.frontend.toFtq.redirect.bits := Mux(s6_flushFromRobValid, frontendFlushBits, s3_redirectGen.bits)
198  io.frontend.toFtq.ftqIdxSelOH.valid := s6_flushFromRobValid || redirectGen.io.stage2Redirect.valid
199  io.frontend.toFtq.ftqIdxSelOH.bits := Cat(s6_flushFromRobValid, redirectGen.io.stage2oldestOH & Fill(NumRedirect + 1, !s6_flushFromRobValid))
200
201  //jmp/brh
202  for (i <- 0 until NumRedirect) {
203    io.frontend.toFtq.ftqIdxAhead(i).valid := exuRedirects(i).valid && exuRedirects(i).bits.cfiUpdate.isMisPred && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead
204    io.frontend.toFtq.ftqIdxAhead(i).bits := exuRedirects(i).bits.ftqIdx
205  }
206  //loadreplay
207  io.frontend.toFtq.ftqIdxAhead(NumRedirect).valid := loadReplay.valid && !s1_robFlushRedirect.valid && !s5_flushFromRobValidAhead
208  io.frontend.toFtq.ftqIdxAhead(NumRedirect).bits := loadReplay.bits.ftqIdx
209  //exception
210  io.frontend.toFtq.ftqIdxAhead.last.valid := s5_flushFromRobValidAhead
211  io.frontend.toFtq.ftqIdxAhead.last.bits := frontendFlushBits.ftqIdx
212  // Be careful here:
213  // T0: rob.io.flushOut, s0_robFlushRedirect
214  // T1: s1_robFlushRedirect, rob.io.exception.valid
215  // T2: csr.redirect.valid
216  // T3: csr.exception.valid
217  // T4: csr.trapTarget
218  // T5: ctrlBlock.trapTarget
219  // T6: io.frontend.toFtq.stage2Redirect.valid
220  val s2_robFlushPc = RegEnable(Mux(s1_robFlushRedirect.bits.flushItself(),
221    s1_robFlushPc, // replay inst
222    s1_robFlushPc + Mux(s1_robFlushRedirect.bits.isRVC, 2.U, 4.U) // flush pipe
223  ), s1_robFlushRedirect.valid)
224  private val s2_csrIsXRet = io.robio.csr.isXRet
225  private val s5_csrIsTrap = DelayN(rob.io.exception.valid, 4)
226  private val s2_s5_trapTargetFromCsr = io.robio.csr.trapTarget
227
228  val flushTarget = Mux(s2_csrIsXRet || s5_csrIsTrap, s2_s5_trapTargetFromCsr, s2_robFlushPc)
229  when (s6_flushFromRobValid) {
230    io.frontend.toFtq.redirect.bits.level := RedirectLevel.flush
231    io.frontend.toFtq.redirect.bits.cfiUpdate.target := RegNext(flushTarget)
232  }
233
234  // vtype commit
235  decode.io.commitVType.bits := io.fromDataPath.vtype
236  decode.io.commitVType.valid := RegNext(rob.io.isVsetFlushPipe)
237
238  io.toDataPath.vtypeAddr := rob.io.vconfigPdest
239
240  decode.io.walkVType := rob.io.toDecode.vtype
241
242  decode.io.redirect := s1_s3_redirect.valid || s2_s4_pendingRedirectValid
243
244  decode.io.in.zip(io.frontend.cfVec).foreach { case (decodeIn, frontendCf) =>
245    decodeIn.valid := frontendCf.valid
246    frontendCf.ready := decodeIn.ready
247    decodeIn.bits.connectCtrlFlow(frontendCf.bits)
248  }
249  decode.io.csrCtrl := RegNext(io.csrCtrl)
250  decode.io.intRat <> rat.io.intReadPorts
251  decode.io.fpRat <> rat.io.fpReadPorts
252  decode.io.vecRat <> rat.io.vecReadPorts
253  decode.io.fusion := 0.U.asTypeOf(decode.io.fusion) // Todo
254  decode.io.stallReason.in <> io.frontend.stallReason
255
256  // snapshot check
257  class CFIRobIdx extends Bundle {
258    val robIdx = Vec(RenameWidth, new RobPtr)
259    val isCFI = Vec(RenameWidth, Bool())
260  }
261  val genSnapshot = Cat(rename.io.out.map(out => out.fire && out.bits.snapshot)).orR
262  val snpt = Module(new SnapshotGenerator(0.U.asTypeOf(new CFIRobIdx)))
263  snpt.io.enq := genSnapshot
264  snpt.io.enqData.robIdx := rename.io.out.map(_.bits.robIdx)
265  snpt.io.enqData.isCFI := rename.io.out.map(_.bits.snapshot)
266  snpt.io.deq := snpt.io.valids(snpt.io.deqPtr.value) && rob.io.commits.isCommit &&
267    Cat(rob.io.commits.commitValid.zip(rob.io.commits.robIdx).map(x => x._1 && x._2 === snpt.io.snapshots(snpt.io.deqPtr.value).robIdx.head)).orR
268  snpt.io.redirect := s1_s3_redirect.valid
269  val flushVec = VecInit(snpt.io.snapshots.map { snapshot =>
270    val notCFIMask = snapshot.isCFI.map(~_)
271    val shouldFlushMask = snapshot.robIdx.map(snptRobIdx => snptRobIdx >= s1_s3_redirect.bits.robIdx || isFull(snptRobIdx, s1_s3_redirect.bits.robIdx))
272    val realShouldFlush = (1 to RenameWidth).map(i => Cat(shouldFlushMask.take(i)).orR)
273    s1_s3_redirect.valid && Cat(realShouldFlush.zip(notCFIMask).map(x => x._1 | x._2)).andR
274  })
275  val flushVecNext = RegNext(flushVec, 0.U.asTypeOf(flushVec))
276  snpt.io.flushVec := flushVecNext
277
278  val useSnpt = VecInit.tabulate(RenameSnapshotNum)(idx =>
279    snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head
280  ).reduceTree(_ || _)
281  val snptSelect = MuxCase(
282    0.U(log2Ceil(RenameSnapshotNum).W),
283    (1 to RenameSnapshotNum).map(i => (snpt.io.enqPtr - i.U).value).map(idx =>
284      (snpt.io.valids(idx) && s1_s3_redirect.bits.robIdx >= snpt.io.snapshots(idx).robIdx.head, idx)
285    )
286  )
287
288  rob.io.snpt.snptEnq := DontCare
289  rob.io.snpt.snptDeq := snpt.io.deq
290  rob.io.snpt.useSnpt := useSnpt
291  rob.io.snpt.snptSelect := snptSelect
292  rob.io.snpt.flushVec := flushVecNext
293  rat.io.snpt.snptEnq := genSnapshot
294  rat.io.snpt.snptDeq := snpt.io.deq
295  rat.io.snpt.useSnpt := useSnpt
296  rat.io.snpt.snptSelect := snptSelect
297  rat.io.snpt.flushVec := flushVec
298
299  val decodeHasException = decode.io.out.map(x => x.bits.exceptionVec(instrPageFault) || x.bits.exceptionVec(instrAccessFault))
300  // fusion decoder
301  for (i <- 0 until DecodeWidth) {
302    fusionDecoder.io.in(i).valid := decode.io.out(i).valid && !(decodeHasException(i) || disableFusion)
303    fusionDecoder.io.in(i).bits := decode.io.out(i).bits.instr
304    if (i > 0) {
305      fusionDecoder.io.inReady(i - 1) := decode.io.out(i).ready
306    }
307  }
308
309  private val decodePipeRename = Wire(Vec(RenameWidth, DecoupledIO(new DecodedInst)))
310
311  for (i <- 0 until RenameWidth) {
312    PipelineConnect(decode.io.out(i), decodePipeRename(i), rename.io.in(i).ready,
313      s1_s3_redirect.valid || s2_s4_pendingRedirectValid, moduleName = Some("decodePipeRenameModule"))
314
315    decodePipeRename(i).ready := rename.io.in(i).ready
316    rename.io.in(i).valid := decodePipeRename(i).valid && !fusionDecoder.io.clear(i)
317    rename.io.in(i).bits := decodePipeRename(i).bits
318  }
319
320  for (i <- 0 until RenameWidth - 1) {
321    fusionDecoder.io.dec(i) := decodePipeRename(i).bits
322    rename.io.fusionInfo(i) := fusionDecoder.io.info(i)
323
324    // update the first RenameWidth - 1 instructions
325    decode.io.fusion(i) := fusionDecoder.io.out(i).valid && rename.io.out(i).fire
326    when (fusionDecoder.io.out(i).valid) {
327      fusionDecoder.io.out(i).bits.update(rename.io.in(i).bits)
328      // TODO: remove this dirty code for ftq update
329      val sameFtqPtr = rename.io.in(i).bits.ftqPtr.value === rename.io.in(i + 1).bits.ftqPtr.value
330      val ftqOffset0 = rename.io.in(i).bits.ftqOffset
331      val ftqOffset1 = rename.io.in(i + 1).bits.ftqOffset
332      val ftqOffsetDiff = ftqOffset1 - ftqOffset0
333      val cond1 = sameFtqPtr && ftqOffsetDiff === 1.U
334      val cond2 = sameFtqPtr && ftqOffsetDiff === 2.U
335      val cond3 = !sameFtqPtr && ftqOffset1 === 0.U
336      val cond4 = !sameFtqPtr && ftqOffset1 === 1.U
337      rename.io.in(i).bits.commitType := Mux(cond1, 4.U, Mux(cond2, 5.U, Mux(cond3, 6.U, 7.U)))
338      XSError(!cond1 && !cond2 && !cond3 && !cond4, p"new condition $sameFtqPtr $ftqOffset0 $ftqOffset1\n")
339    }
340
341  }
342
343  // memory dependency predict
344  // when decode, send fold pc to mdp
345  private val mdpFlodPcVec = Wire(Vec(DecodeWidth, UInt(MemPredPCWidth.W)))
346  for (i <- 0 until DecodeWidth) {
347    mdpFlodPcVec(i) := Mux(
348      decode.io.out(i).fire,
349      decode.io.in(i).bits.foldpc,
350      rename.io.in(i).bits.foldpc
351    )
352  }
353
354  // currently, we only update mdp info when isReplay
355  memCtrl.io.redirect := s1_s3_redirect
356  memCtrl.io.csrCtrl := io.csrCtrl                          // RegNext in memCtrl
357  memCtrl.io.stIn := io.fromMem.stIn                        // RegNext in memCtrl
358  memCtrl.io.memPredUpdate := redirectGen.io.memPredUpdate  // RegNext in memCtrl
359  memCtrl.io.mdpFlodPcVec := mdpFlodPcVec
360  memCtrl.io.dispatchLFSTio <> dispatch.io.lfst
361
362  rat.io.redirect := s1_s3_redirect.valid
363  rat.io.robCommits := rob.io.rabCommits
364  rat.io.diffCommits := rob.io.diffCommits
365  rat.io.intRenamePorts := rename.io.intRenamePorts
366  rat.io.fpRenamePorts := rename.io.fpRenamePorts
367  rat.io.vecRenamePorts := rename.io.vecRenamePorts
368
369  rename.io.redirect := s1_s3_redirect
370  rename.io.robCommits <> rob.io.rabCommits
371  rename.io.waittable := (memCtrl.io.waitTable2Rename zip decode.io.out).map{ case(waittable2rename, decodeOut) =>
372    RegEnable(waittable2rename, decodeOut.fire)
373  }
374  rename.io.ssit := memCtrl.io.ssit2Rename
375  rename.io.intReadPorts := VecInit(rat.io.intReadPorts.map(x => VecInit(x.map(_.data))))
376  rename.io.fpReadPorts := VecInit(rat.io.fpReadPorts.map(x => VecInit(x.map(_.data))))
377  rename.io.vecReadPorts := VecInit(rat.io.vecReadPorts.map(x => VecInit(x.map(_.data))))
378  rename.io.int_need_free := rat.io.int_need_free
379  rename.io.int_old_pdest := rat.io.int_old_pdest
380  rename.io.fp_old_pdest := rat.io.fp_old_pdest
381  rename.io.vec_old_pdest := rat.io.vec_old_pdest
382  rename.io.debug_int_rat.foreach(_ := rat.io.debug_int_rat.get)
383  rename.io.debug_fp_rat.foreach(_ := rat.io.debug_fp_rat.get)
384  rename.io.debug_vec_rat.foreach(_ := rat.io.debug_vec_rat.get)
385  rename.io.debug_vconfig_rat.foreach(_ := rat.io.debug_vconfig_rat.get)
386  rename.io.stallReason.in <> decode.io.stallReason.out
387  rename.io.snpt.snptEnq := DontCare
388  rename.io.snpt.snptDeq := snpt.io.deq
389  rename.io.snpt.useSnpt := useSnpt
390  rename.io.snpt.snptSelect := snptSelect
391  rename.io.robIsEmpty := rob.io.enq.isEmpty
392  rename.io.snpt.flushVec := flushVecNext
393  rename.io.snptLastEnq.valid := !isEmpty(snpt.io.enqPtr, snpt.io.deqPtr)
394  rename.io.snptLastEnq.bits := snpt.io.snapshots((snpt.io.enqPtr - 1.U).value).robIdx.head
395
396  val renameOut = Wire(chiselTypeOf(rename.io.out))
397  renameOut <> rename.io.out
398  dispatch.io.fromRename <> renameOut
399  renameOut.zip(dispatch.io.recv).map{case (rename,recv) => rename.ready := recv}
400  dispatch.io.fromRenameIsFp := rename.io.toDispatchIsFp
401  dispatch.io.fromRenameIsInt := rename.io.toDispatchIsInt
402  dispatch.io.hartId := io.fromTop.hartId
403  dispatch.io.redirect := s1_s3_redirect
404  dispatch.io.enqRob <> rob.io.enq
405  dispatch.io.robHead := rob.io.debugRobHead
406  dispatch.io.stallReason <> rename.io.stallReason.out
407  dispatch.io.lqCanAccept := io.lqCanAccept
408  dispatch.io.sqCanAccept := io.sqCanAccept
409  dispatch.io.robHeadNotReady := rob.io.headNotReady
410  dispatch.io.robFull := rob.io.robFull
411  dispatch.io.singleStep := RegNext(io.csrCtrl.singlestep)
412
413  intDq.io.enq <> dispatch.io.toIntDq
414  intDq.io.redirect <> s2_s4_redirect
415
416  fpDq.io.enq <> dispatch.io.toFpDq
417  fpDq.io.redirect <> s2_s4_redirect
418
419  lsDq.io.enq <> dispatch.io.toLsDq
420  lsDq.io.redirect <> s2_s4_redirect
421
422  io.toIssueBlock.intUops <> intDq.io.deq
423  io.toIssueBlock.vfUops  <> fpDq.io.deq
424  io.toIssueBlock.memUops <> lsDq.io.deq
425  io.toIssueBlock.allocPregs <> dispatch.io.allocPregs
426  io.toIssueBlock.flush   <> s2_s4_redirect
427
428  pcMem.io.wen.head   := RegNext(io.frontend.fromFtq.pc_mem_wen)
429  pcMem.io.waddr.head := RegEnable(io.frontend.fromFtq.pc_mem_waddr, io.frontend.fromFtq.pc_mem_wen)
430  pcMem.io.wdata.head := RegEnable(io.frontend.fromFtq.pc_mem_wdata, io.frontend.fromFtq.pc_mem_wen)
431
432  private val jumpPcVec         : Vec[UInt] = Wire(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
433  io.toIssueBlock.pcVec := jumpPcVec
434
435  io.toDataPath.flush := s2_s4_redirect
436  io.toExuBlock.flush := s2_s4_redirect
437
438  for ((pcMemIdx, i) <- pcMemRdIndexes("exu").zipWithIndex) {
439    pcMem.io.raddr(pcMemIdx) := intDq.io.deqNext(i).ftqPtr.value
440    jumpPcVec(i) := pcMem.io.rdata(pcMemIdx).getPc(RegNext(intDq.io.deqNext(i).ftqOffset))
441  }
442
443  val dqOuts = Seq(io.toIssueBlock.intUops) ++ Seq(io.toIssueBlock.vfUops) ++ Seq(io.toIssueBlock.memUops)
444  dqOuts.zipWithIndex.foreach { case (dqOut, dqIdx) =>
445    dqOut.map(_.bits.pc).zipWithIndex.map{ case (pc, portIdx) =>
446      if(params.allSchdParams(dqIdx).numPcReadPort > 0){
447        val realJumpPcVec = jumpPcVec.drop(params.allSchdParams.take(dqIdx).map(_.numPcReadPort).sum).take(params.allSchdParams(dqIdx).numPcReadPort)
448        pc := realJumpPcVec(portIdx)
449      }
450    }
451  }
452
453  rob.io.hartId := io.fromTop.hartId
454  rob.io.redirect := s1_s3_redirect
455  rob.io.writeback := delayedNotFlushedWriteBack
456
457  io.redirect := s1_s3_redirect
458
459  // rob to int block
460  io.robio.csr <> rob.io.csr
461  // When wfi is disabled, it will not block ROB commit.
462  rob.io.csr.wfiEvent := io.robio.csr.wfiEvent
463  rob.io.wfi_enable := decode.io.csrCtrl.wfi_enable
464
465  io.toTop.cpuHalt := DelayN(rob.io.cpu_halt, 5)
466
467  io.robio.csr.perfinfo.retiredInstr <> RegNext(rob.io.csr.perfinfo.retiredInstr)
468  io.robio.exception := rob.io.exception
469  io.robio.exception.bits.pc := s1_robFlushPc
470
471  // rob to mem block
472  io.robio.lsq <> rob.io.lsq
473
474  io.debug_int_rat    .foreach(_ := rat.io.diff_int_rat.get)
475  io.debug_fp_rat     .foreach(_ := rat.io.diff_fp_rat.get)
476  io.debug_vec_rat    .foreach(_ := rat.io.diff_vec_rat.get)
477  io.debug_vconfig_rat.foreach(_ := rat.io.diff_vconfig_rat.get)
478
479  rob.io.debug_ls := io.robio.debug_ls
480  rob.io.debugHeadLsIssue := io.robio.robHeadLsIssue
481  rob.io.lsTopdownInfo := io.robio.lsTopdownInfo
482  rob.io.debugEnqLsq := io.debugEnqLsq
483
484  io.robio.robDeqPtr := rob.io.robDeqPtr
485
486  io.debugTopDown.fromRob := rob.io.debugTopDown.toCore
487  dispatch.io.debugTopDown.fromRob := rob.io.debugTopDown.toDispatch
488  dispatch.io.debugTopDown.fromCore := io.debugTopDown.fromCore
489  io.debugRolling := rob.io.debugRolling
490
491  io.perfInfo.ctrlInfo.robFull := RegNext(rob.io.robFull)
492  io.perfInfo.ctrlInfo.intdqFull := RegNext(intDq.io.dqFull)
493  io.perfInfo.ctrlInfo.fpdqFull := RegNext(fpDq.io.dqFull)
494  io.perfInfo.ctrlInfo.lsdqFull := RegNext(lsDq.io.dqFull)
495
496  val pfevent = Module(new PFEvent)
497  pfevent.io.distribute_csr := RegNext(io.csrCtrl.distribute_csr)
498  val csrevents = pfevent.io.hpmevent.slice(8,16)
499
500  val perfinfo = IO(new Bundle(){
501    val perfEventsRs      = Input(Vec(params.IqCnt, new PerfEvent))
502    val perfEventsEu0     = Input(Vec(6, new PerfEvent))
503    val perfEventsEu1     = Input(Vec(6, new PerfEvent))
504  })
505
506  val allPerfEvents = Seq(decode, rename, dispatch, intDq, fpDq, lsDq, rob).flatMap(_.getPerf)
507  val hpmEvents = allPerfEvents ++ perfinfo.perfEventsEu0 ++ perfinfo.perfEventsEu1 ++ perfinfo.perfEventsRs
508  val perfEvents = HPerfMonitor(csrevents, hpmEvents).getPerfEvents
509  generatePerfEvent()
510}
511
512class CtrlBlockIO()(implicit p: Parameters, params: BackendParams) extends XSBundle {
513  val fromTop = new Bundle {
514    val hartId = Input(UInt(8.W))
515  }
516  val toTop = new Bundle {
517    val cpuHalt = Output(Bool())
518  }
519  val frontend = Flipped(new FrontendToCtrlIO())
520  val toIssueBlock = new Bundle {
521    val flush = ValidIO(new Redirect)
522    val allocPregs = Vec(RenameWidth, Output(new ResetPregStateReq))
523    val intUops = Vec(dpParams.IntDqDeqWidth, DecoupledIO(new DynInst))
524    val vfUops = Vec(dpParams.FpDqDeqWidth, DecoupledIO(new DynInst))
525    val memUops = Vec(dpParams.LsDqDeqWidth, DecoupledIO(new DynInst))
526    val pcVec = Output(Vec(params.numPcReadPort, UInt(VAddrData().dataWidth.W)))
527  }
528  val fromDataPath = new Bundle{
529    val vtype = Input(new VType)
530  }
531  val toDataPath = new Bundle {
532    val vtypeAddr = Output(UInt(PhyRegIdxWidth.W))
533    val flush = ValidIO(new Redirect)
534  }
535  val toExuBlock = new Bundle {
536    val flush = ValidIO(new Redirect)
537  }
538  val fromWB = new Bundle {
539    val wbData = Flipped(MixedVec(params.genWrite2CtrlBundles))
540  }
541  val redirect = ValidIO(new Redirect)
542  val fromMem = new Bundle {
543    val stIn = Vec(params.StaCnt, Flipped(ValidIO(new DynInst))) // use storeSetHit, ssid, robIdx
544    val violation = Flipped(ValidIO(new Redirect))
545  }
546  val memLdPcRead = Vec(params.LduCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
547  val memStPcRead = Vec(params.StaCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
548  val memHyPcRead = Vec(params.HyuCnt, Flipped(new FtqRead(UInt(VAddrBits.W))))
549
550  val csrCtrl = Input(new CustomCSRCtrlIO)
551  val robio = new Bundle {
552    val csr = new RobCSRIO
553    val exception = ValidIO(new ExceptionInfo)
554    val lsq = new RobLsqIO
555    val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Input(new LsTopdownInfo))
556    val debug_ls = Input(new DebugLSIO())
557    val robHeadLsIssue = Input(Bool())
558    val robDeqPtr = Output(new RobPtr)
559  }
560
561  val perfInfo = Output(new Bundle{
562    val ctrlInfo = new Bundle {
563      val robFull   = Bool()
564      val intdqFull = Bool()
565      val fpdqFull  = Bool()
566      val lsdqFull  = Bool()
567    }
568  })
569  val debug_int_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
570  val debug_fp_rat      = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
571  val debug_vec_rat     = if (params.debugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None
572  val debug_vconfig_rat = if (params.debugEn) Some(Output(UInt(PhyRegIdxWidth.W))) else None // TODO: use me
573
574  val sqCanAccept = Input(Bool())
575  val lqCanAccept = Input(Bool())
576
577  val debugTopDown = new Bundle {
578    val fromRob = new RobCoreTopDownIO
579    val fromCore = new CoreDispatchTopDownIO
580  }
581  val debugRolling = new RobDebugRollingIO
582  val debugEnqLsq = Input(new LsqEnqIO)
583}
584
585class NamedIndexes(namedCnt: Seq[(String, Int)]) {
586  require(namedCnt.map(_._1).distinct.size == namedCnt.size, "namedCnt should not have the same name")
587
588  val maxIdx = namedCnt.map(_._2).sum
589  val nameRangeMap: Map[String, (Int, Int)] = namedCnt.indices.map { i =>
590    val begin = namedCnt.slice(0, i).map(_._2).sum
591    val end = begin + namedCnt(i)._2
592    (namedCnt(i)._1, (begin, end))
593  }.toMap
594
595  def apply(name: String): Seq[Int] = {
596    require(nameRangeMap.contains(name))
597    nameRangeMap(name)._1 until nameRangeMap(name)._2
598  }
599}
600