xref: /XiangShan/src/main/scala/xiangshan/backend/issue/IssueQueue.scala (revision a63155a6a44b3c7714e55906b55ebf92e0efc125)
1package xiangshan.backend.issue
2
3import chipsalliance.rocketchip.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
7import utility.HasCircularQueuePtrHelper
8import utils._
9import xiangshan._
10import xiangshan.backend.Bundles._
11import xiangshan.backend.decode.{ImmUnion, Imm_LUI_LOAD}
12import xiangshan.backend.datapath.DataConfig._
13import xiangshan.backend.datapath.DataSource
14import xiangshan.backend.fu.{FuConfig, FuType}
15import xiangshan.mem.{MemWaitUpdateReq, SqPtr}
16import xiangshan.backend.datapath.NewPipelineConnect
17
18class IssueQueue(params: IssueBlockParams)(implicit p: Parameters) extends LazyModule with HasXSParameter {
19  implicit val iqParams = params
20  lazy val module = iqParams.schdType match {
21    case IntScheduler() => new IssueQueueIntImp(this)
22    case VfScheduler() => new IssueQueueVfImp(this)
23    case MemScheduler() => if (iqParams.StdCnt == 0) new IssueQueueMemAddrImp(this)
24      else new IssueQueueIntImp(this)
25    case _ => null
26  }
27}
28
29class IssueQueueStatusBundle(numEnq: Int) extends Bundle {
30  val empty = Output(Bool())
31  val full = Output(Bool())
32  val leftVec = Output(Vec(numEnq + 1, Bool()))
33}
34
35class IssueQueueDeqRespBundle(implicit p:Parameters, params: IssueBlockParams) extends EntryDeqRespBundle
36
37class IssueQueueIO()(implicit p: Parameters, params: IssueBlockParams) extends XSBundle {
38  // Inputs
39  val flush = Flipped(ValidIO(new Redirect))
40  val enq = Vec(params.numEnq, Flipped(DecoupledIO(new DynInst)))
41
42  val deqResp = Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle)))
43  val og0Resp = Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle)))
44  val og1Resp = Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle)))
45  val finalIssueResp = OptionWrapper(params.LduCnt > 0, Vec(params.numDeq, Flipped(ValidIO(new IssueQueueDeqRespBundle))))
46  val wbBusyTableRead = Input(params.genWbFuBusyTableReadBundle())
47  val wbBusyTableWrite = Output(params.genWbFuBusyTableWriteBundle())
48  val wakeupFromWB: MixedVec[ValidIO[IssueQueueWBWakeUpBundle]] = Flipped(params.genWBWakeUpSinkValidBundle)
49  val wakeupFromIQ: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpSinkValidBundle)
50  val og0Cancel = Input(ExuVec(backendParams.numExu))
51  val og1Cancel = Input(ExuVec(backendParams.numExu))
52  val ldCancel = Vec(backendParams.LduCnt, Flipped(new LoadCancelIO))
53
54  // Outputs
55  val deq: MixedVec[DecoupledIO[IssueQueueIssueBundle]] = params.genIssueDecoupledBundle
56  val wakeupToIQ: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = params.genIQWakeUpSourceValidBundle
57  val status = Output(new IssueQueueStatusBundle(params.numEnq))
58  val statusNext = Output(new IssueQueueStatusBundle(params.numEnq))
59
60  val fromCancelNetwork = Flipped(params.genIssueDecoupledBundle)
61  val deqDelay: MixedVec[DecoupledIO[IssueQueueIssueBundle]] = params.genIssueDecoupledBundle// = deq.cloneType
62  def allWakeUp = wakeupFromWB ++ wakeupFromIQ
63}
64
65class IssueQueueImp(override val wrapper: IssueQueue)(implicit p: Parameters, val params: IssueBlockParams)
66  extends LazyModuleImp(wrapper)
67  with HasXSParameter {
68
69  println(s"[IssueQueueImp] ${params.getIQName} wakeupFromWB(${io.wakeupFromWB.size}), " +
70    s"wakeup exu in(${params.wakeUpInExuSources.size}): ${params.wakeUpInExuSources.map(_.name).mkString("{",",","}")}, " +
71    s"wakeup exu out(${params.wakeUpOutExuSources.size}): ${params.wakeUpOutExuSources.map(_.name).mkString("{",",","}")}, " +
72    s"numEntries: ${params.numEntries}, numRegSrc: ${params.numRegSrc}")
73
74  require(params.numExu <= 2, "IssueQueue has not supported more than 2 deq ports")
75  val deqFuCfgs     : Seq[Seq[FuConfig]] = params.exuBlockParams.map(_.fuConfigs)
76  val allDeqFuCfgs  : Seq[FuConfig] = params.exuBlockParams.flatMap(_.fuConfigs)
77  val fuCfgsCnt     : Map[FuConfig, Int] = allDeqFuCfgs.groupBy(x => x).map { case (cfg, cfgSeq) => (cfg, cfgSeq.length) }
78  val commonFuCfgs  : Seq[FuConfig] = fuCfgsCnt.filter(_._2 > 1).keys.toSeq
79  val fuLatencyMaps : Seq[Map[Int, Int]] = params.exuBlockParams.map(x => x.fuLatencyMap)
80
81  println(s"[IssueQueueImp] ${params.getIQName} fuLatencyMaps: ${fuLatencyMaps}")
82  println(s"[IssueQueueImp] ${params.getIQName} commonFuCfgs: ${commonFuCfgs.map(_.name)}")
83  lazy val io = IO(new IssueQueueIO())
84  dontTouch(io.deq)
85  dontTouch(io.deqResp)
86  // Modules
87
88  val entries = Module(new Entries)
89  val subDeqPolicies  = deqFuCfgs.map(x => if (x.nonEmpty) Some(Module(new DeqPolicy())) else None)
90  val fuBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.latencyValMax > 0, Module(new FuBusyTableWrite(x.fuLatencyMap))) }
91  val fuBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.latencyValMax > 0, Module(new FuBusyTableRead(x.fuLatencyMap))) }
92  val intWbBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.intLatencyCertain, Module(new FuBusyTableWrite(x.intFuLatencyMap))) }
93  val intWbBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.intLatencyCertain, Module(new FuBusyTableRead(x.intFuLatencyMap))) }
94  val vfWbBusyTableWrite = params.exuBlockParams.map { case x => OptionWrapper(x.vfLatencyCertain, Module(new FuBusyTableWrite(x.vfFuLatencyMap))) }
95  val vfWbBusyTableRead = params.exuBlockParams.map { case x => OptionWrapper(x.vfLatencyCertain, Module(new FuBusyTableRead(x.vfFuLatencyMap))) }
96
97  class WakeupQueueFlush extends Bundle {
98    val redirect = ValidIO(new Redirect)
99    val ldCancel = Vec(backendParams.LduCnt, new LoadCancelIO)
100    val og0Fail = Output(Bool())
101    val og1Fail = Output(Bool())
102  }
103
104  private def flushFunc(exuInput: ExuInput, flush: WakeupQueueFlush, stage: Int): Bool = {
105    val redirectFlush = exuInput.robIdx.needFlush(flush.redirect)
106    val loadDependencyFlush = LoadShouldCancel(exuInput.loadDependency, flush.ldCancel)
107    val ogFailFlush = stage match {
108      case 1 => flush.og0Fail
109      case 2 => flush.og1Fail
110      case _ => false.B
111    }
112    redirectFlush || loadDependencyFlush || ogFailFlush
113  }
114
115  private def modificationFunc(exuInput: ExuInput): ExuInput = {
116    val newExuInput = WireDefault(exuInput)
117    newExuInput.loadDependency match {
118      case Some(deps) => deps.zip(exuInput.loadDependency.get).foreach(x => x._1 := x._2 << 1)
119      case None =>
120    }
121    newExuInput
122  }
123
124  val wakeUpQueues: Seq[Option[MultiWakeupQueue[ExuInput, WakeupQueueFlush]]] = params.exuBlockParams.map { x => OptionWrapper(x.isIQWakeUpSource, Module(
125    new MultiWakeupQueue(new ExuInput(x), new WakeupQueueFlush, x.fuLatancySet, flushFunc, modificationFunc)
126  ))}
127
128  val intWbBusyTableIn = io.wbBusyTableRead.map(_.intWbBusyTable)
129  val vfWbBusyTableIn = io.wbBusyTableRead.map(_.vfWbBusyTable)
130  val intWbBusyTableOut = io.wbBusyTableWrite.map(_.intWbBusyTable)
131  val vfWbBusyTableOut = io.wbBusyTableWrite.map(_.vfWbBusyTable)
132  val intDeqRespSetOut = io.wbBusyTableWrite.map(_.intDeqRespSet)
133  val vfDeqRespSetOut = io.wbBusyTableWrite.map(_.vfDeqRespSet)
134  val fuBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
135  val intWbBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
136  val vfWbBusyTableMask = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
137  val s0_enqValidVec = io.enq.map(_.valid)
138  val s0_enqSelValidVec = Wire(Vec(params.numEnq, Bool()))
139  val s0_enqNotFlush = !io.flush.valid
140  val s0_enqBits = WireInit(VecInit(io.enq.map(_.bits)))
141  val s0_doEnqSelValidVec = s0_enqSelValidVec.map(_ && s0_enqNotFlush) //enqValid && notFlush && enqReady
142
143
144  // One deq port only need one special deq policy
145  val subDeqSelValidVec: Seq[Option[Vec[Bool]]] = subDeqPolicies.map(_.map(_ => Wire(Vec(params.numDeq, Bool()))))
146  val subDeqSelOHVec: Seq[Option[Vec[UInt]]] = subDeqPolicies.map(_.map(_ => Wire(Vec(params.numDeq, UInt(params.numEntries.W)))))
147
148  val finalDeqSelValidVec = Wire(Vec(params.numDeq, Bool()))
149  val finalDeqSelOHVec    = Wire(Vec(params.numDeq, UInt(params.numEntries.W)))
150  val finalDeqOH: IndexedSeq[UInt] = (finalDeqSelValidVec zip finalDeqSelOHVec).map { case (valid, oh) =>
151    Mux(valid, oh, 0.U)
152  }
153  val finalDeqMask: UInt = finalDeqOH.reduce(_ | _)
154
155  val deqRespVec = io.deqResp
156
157  val validVec = VecInit(entries.io.valid.asBools)
158  val canIssueVec = VecInit(entries.io.canIssue.asBools)
159  val clearVec = VecInit(entries.io.clear.asBools)
160  val deqFirstIssueVec = VecInit(entries.io.deq.map(_.isFirstIssue))
161
162  val dataSources: Vec[Vec[DataSource]] = entries.io.dataSources
163  val finalDataSources: Vec[Vec[DataSource]] = VecInit(finalDeqOH.map(oh => Mux1H(oh, dataSources)))
164  // (entryIdx)(srcIdx)(exuIdx)
165  val wakeUpL1ExuOH: Option[Vec[Vec[Vec[Bool]]]] = entries.io.srcWakeUpL1ExuOH
166  val srcTimer: Option[Vec[Vec[UInt]]] = entries.io.srcTimer
167
168  // (deqIdx)(srcIdx)(exuIdx)
169  val finalWakeUpL1ExuOH: Option[Vec[Vec[Vec[Bool]]]] = wakeUpL1ExuOH.map(x => VecInit(finalDeqOH.map(oh => Mux1H(oh, x))))
170  val finalSrcTimer = srcTimer.map(x => VecInit(finalDeqOH.map(oh => Mux1H(oh, x))))
171
172  val wakeupEnqSrcStateBypassFromWB: Vec[Vec[UInt]] = Wire(Vec(io.enq.size, Vec(io.enq.head.bits.srcType.size, SrcState())))
173  val wakeupEnqSrcStateBypassFromIQ: Vec[Vec[UInt]] = Wire(Vec(io.enq.size, Vec(io.enq.head.bits.srcType.size, SrcState())))
174  val srcWakeUpEnqByIQMatrix = Wire(Vec(params.numEnq, Vec(params.numRegSrc, Vec(params.numWakeupFromIQ, Bool()))))
175
176  val shiftedWakeupLoadDependencyByIQVec = Wire(Vec(params.numWakeupFromIQ, Vec(LoadPipelineWidth, UInt(3.W))))
177  shiftedWakeupLoadDependencyByIQVec
178    .zip(io.wakeupFromIQ.map(_.bits.loadDependency))
179    .zip(params.wakeUpInExuSources.map(_.name)).foreach {
180    case ((deps, originalDeps), name) => deps.zip(originalDeps).zipWithIndex.foreach {
181      case ((dep, originalDep), deqPortIdx) =>
182        if (name.contains("LDU") && name.replace("LDU", "").toInt == deqPortIdx)
183          dep := originalDep << 1 | 1.U
184        else
185          dep := originalDep << 1
186    }
187  }
188
189  for (i <- io.enq.indices) {
190    for (j <- s0_enqBits(i).srcType.indices) {
191      wakeupEnqSrcStateBypassFromWB(i)(j) := Cat(
192        io.wakeupFromWB.map(x => x.bits.wakeUp(Seq((s0_enqBits(i).psrc(j), s0_enqBits(i).srcType(j))), x.valid).head)
193      ).orR
194    }
195  }
196
197  for (i <- io.enq.indices) {
198    val numLsrc = s0_enqBits(i).srcType.size.min(entries.io.enq(i).bits.status.srcType.size)
199    for (j <- s0_enqBits(i).srcType.indices) {
200      val ldTransCancel = if (params.numWakeupFromIQ > 0 && j < numLsrc) Mux(
201        srcWakeUpEnqByIQMatrix(i)(j).asUInt.orR,
202        Mux1H(srcWakeUpEnqByIQMatrix(i)(j), io.wakeupFromIQ.map(_.bits.loadDependency).map(dep => LoadShouldCancel(Some(dep), io.ldCancel))),
203        false.B
204      ) else false.B
205      wakeupEnqSrcStateBypassFromIQ(i)(j) := Cat(
206        io.wakeupFromIQ.map(x => x.bits.wakeUp(Seq((s0_enqBits(i).psrc(j), s0_enqBits(i).srcType(j))), x.valid).head)
207      ).orR && !ldTransCancel
208    }
209  }
210
211  srcWakeUpEnqByIQMatrix.zipWithIndex.foreach { case (wakeups: Vec[Vec[Bool]], i) =>
212    if (io.wakeupFromIQ.isEmpty) {
213      wakeups := 0.U.asTypeOf(wakeups)
214    } else {
215      val wakeupVec: IndexedSeq[IndexedSeq[Bool]] = io.wakeupFromIQ.map((bundle: ValidIO[IssueQueueIQWakeUpBundle]) =>
216        bundle.bits.wakeUp(s0_enqBits(i).psrc.take(params.numRegSrc) zip s0_enqBits(i).srcType.take(params.numRegSrc), bundle.valid)
217      ).transpose
218      wakeups := wakeupVec.map(x => VecInit(x))
219    }
220  }
221
222  val fuTypeVec = Wire(Vec(params.numEntries, FuType()))
223  val transEntryDeqVec = Wire(Vec(params.numEnq, ValidIO(new EntryBundle)))
224  val deqEntryVec = Wire(Vec(params.numDeq, ValidIO(new EntryBundle)))
225  val transSelVec = Wire(Vec(params.numEnq, UInt((params.numEntries-params.numEnq).W)))
226
227  /**
228    * Connection of [[entries]]
229    */
230  entries.io match { case entriesIO: EntriesIO =>
231    entriesIO.flush <> io.flush
232    entriesIO.wakeUpFromWB := io.wakeupFromWB
233    entriesIO.wakeUpFromIQ := io.wakeupFromIQ
234    entriesIO.og0Cancel := io.og0Cancel
235    entriesIO.og1Cancel := io.og1Cancel
236    entriesIO.ldCancel := io.ldCancel
237    entriesIO.enq.zipWithIndex.foreach { case (enq: ValidIO[EntryBundle], i) =>
238      enq.valid := s0_doEnqSelValidVec(i)
239      val numLsrc = s0_enqBits(i).srcType.size.min(enq.bits.status.srcType.size)
240      for(j <- 0 until numLsrc) {
241        enq.bits.status.srcState(j) := s0_enqBits(i).srcState(j) |
242                                       wakeupEnqSrcStateBypassFromWB(i)(j) |
243                                       wakeupEnqSrcStateBypassFromIQ(i)(j)
244        enq.bits.status.psrc(j) := s0_enqBits(i).psrc(j)
245        enq.bits.status.srcType(j) := s0_enqBits(i).srcType(j)
246        enq.bits.status.dataSources(j).value := Mux(wakeupEnqSrcStateBypassFromIQ(i)(j).asBool, DataSource.forward, s0_enqBits(i).dataSource(j).value)
247      }
248      enq.bits.status.fuType := s0_enqBits(i).fuType
249      enq.bits.status.robIdx := s0_enqBits(i).robIdx
250      enq.bits.status.issueTimer := "b11".U
251      enq.bits.status.deqPortIdx := 0.U
252      enq.bits.status.issued := false.B
253      enq.bits.status.firstIssue := false.B
254      enq.bits.status.blocked := false.B
255      enq.bits.status.srcWakeUpL1ExuOH match {
256        case Some(value) => value.zip(srcWakeUpEnqByIQMatrix(i)).zipWithIndex.foreach {
257          case ((exuOH, wakeUpByIQOH), srcIdx) =>
258            when(wakeUpByIQOH.asUInt.orR) {
259              exuOH := Mux1H(wakeUpByIQOH, io.wakeupFromIQ.map(x => MathUtils.IntToOH(x.bits.exuIdx).U(backendParams.numExu.W))).asBools
260            }.otherwise {
261              exuOH := s0_enqBits(i).l1ExuOH(srcIdx)
262            }
263        }
264        case None =>
265      }
266      enq.bits.status.srcTimer match {
267        case Some(value) => value.zip(srcWakeUpEnqByIQMatrix(i)).zipWithIndex.foreach {
268          case ((timer, wakeUpByIQOH), srcIdx) =>
269            when(wakeUpByIQOH.asUInt.orR) {
270              timer := 1.U.asTypeOf(timer)
271            }.otherwise {
272              timer := Mux(s0_enqBits(i).dataSource(srcIdx).value === DataSource.bypass, 2.U.asTypeOf(timer), 0.U.asTypeOf(timer))
273            }
274        }
275        case None =>
276      }
277      enq.bits.status.srcLoadDependency.foreach(_.zip(srcWakeUpEnqByIQMatrix(i)).zipWithIndex.foreach {
278        case ((dep, wakeUpByIQOH), srcIdx) =>
279          dep := Mux(wakeUpByIQOH.asUInt.orR, Mux1H(wakeUpByIQOH, shiftedWakeupLoadDependencyByIQVec), 0.U.asTypeOf(dep))
280      })
281      enq.bits.imm := s0_enqBits(i).imm
282      enq.bits.payload := s0_enqBits(i)
283    }
284    entriesIO.deq.zipWithIndex.foreach { case (deq, i) =>
285      deq.deqSelOH.valid := finalDeqSelValidVec(i)
286      deq.deqSelOH.bits := finalDeqSelOHVec(i)
287    }
288    entriesIO.deqResp.zipWithIndex.foreach { case (deqResp, i) =>
289      deqResp.valid := io.deqResp(i).valid
290      deqResp.bits.robIdx := io.deqResp(i).bits.robIdx
291      deqResp.bits.dataInvalidSqIdx := io.deqResp(i).bits.dataInvalidSqIdx
292      deqResp.bits.respType := io.deqResp(i).bits.respType
293      deqResp.bits.rfWen := io.deqResp(i).bits.rfWen
294      deqResp.bits.fuType := io.deqResp(i).bits.fuType
295    }
296    entriesIO.og0Resp.zipWithIndex.foreach { case (og0Resp, i) =>
297      og0Resp.valid := io.og0Resp(i).valid
298      og0Resp.bits.robIdx := io.og0Resp(i).bits.robIdx
299      og0Resp.bits.dataInvalidSqIdx := io.og0Resp(i).bits.dataInvalidSqIdx
300      og0Resp.bits.respType := io.og0Resp(i).bits.respType
301      og0Resp.bits.rfWen := io.og0Resp(i).bits.rfWen
302      og0Resp.bits.fuType := io.og0Resp(i).bits.fuType
303    }
304    entriesIO.og1Resp.zipWithIndex.foreach { case (og1Resp, i) =>
305      og1Resp.valid := io.og1Resp(i).valid
306      og1Resp.bits.robIdx := io.og1Resp(i).bits.robIdx
307      og1Resp.bits.dataInvalidSqIdx := io.og1Resp(i).bits.dataInvalidSqIdx
308      og1Resp.bits.respType := io.og1Resp(i).bits.respType
309      og1Resp.bits.rfWen := io.og1Resp(i).bits.rfWen
310      og1Resp.bits.fuType := io.og1Resp(i).bits.fuType
311    }
312    entriesIO.finalIssueResp.foreach(_.zipWithIndex.foreach { case (finalIssueResp, i) =>
313      finalIssueResp := io.finalIssueResp.get(i)
314    })
315    transEntryDeqVec := entriesIO.transEntryDeqVec
316    deqEntryVec := entriesIO.deqEntry
317    fuTypeVec := entriesIO.fuType
318    transSelVec := entriesIO.transSelVec
319  }
320
321
322  s0_enqSelValidVec := s0_enqValidVec.zip(io.enq).map{ case (enqValid, enq) => enqValid && enq.ready}
323
324  protected val commonAccept: UInt = Cat(fuTypeVec.map(fuType =>
325    Cat(commonFuCfgs.map(_.fuType.U === fuType)).orR
326  ).reverse)
327
328  // if deq port can accept the uop
329  protected val canAcceptVec: Seq[UInt] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
330    Cat(fuTypeVec.map(fuType => Cat(fuCfgs.map(_.fuType.U === fuType)).orR).reverse).asUInt
331  }
332
333  protected val deqCanAcceptVec: Seq[IndexedSeq[Bool]] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
334    fuTypeVec.map(fuType =>
335      Cat(fuCfgs.map(_.fuType.U === fuType)).asUInt.orR) // C+E0    C+E1
336  }
337
338  subDeqPolicies.zipWithIndex.foreach { case (dpOption: Option[DeqPolicy], i) =>
339    if (dpOption.nonEmpty) {
340      val dp = dpOption.get
341      dp.io.request             := canIssueVec.asUInt & VecInit(deqCanAcceptVec(i)).asUInt & (~fuBusyTableMask(i)).asUInt & (~intWbBusyTableMask(i)).asUInt & (~vfWbBusyTableMask(i)).asUInt
342      subDeqSelValidVec(i).get  := dp.io.deqSelOHVec.map(oh => oh.valid)
343      subDeqSelOHVec(i).get     := dp.io.deqSelOHVec.map(oh => oh.bits)
344    }
345  }
346
347  protected val enqCanAcceptVec: Seq[IndexedSeq[Bool]] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
348    io.enq.map(_.bits.fuType).map(fuType =>
349      Cat(fuCfgs.map(_.fuType.U === fuType)).asUInt.orR) // C+E0    C+E1
350  }
351
352  protected val transCanAcceptVec: Seq[IndexedSeq[Bool]] = deqFuCfgs.map { fuCfgs: Seq[FuConfig] =>
353    transEntryDeqVec.map(_.bits.status.fuType).zip(transEntryDeqVec.map(_.valid)).map{ case (fuType, valid) =>
354      Cat(fuCfgs.map(_.fuType.U === fuType)).asUInt.orR && valid }
355  }
356
357  val enqEntryOldest = (0 until params.numDeq).map {
358    case deqIdx =>
359      NewAgeDetector(numEntries = params.numEnq,
360        enq = VecInit(enqCanAcceptVec(deqIdx).zip(s0_doEnqSelValidVec).map{ case (doCanAccept, valid) => doCanAccept && valid }),
361        clear = VecInit(clearVec.take(params.numEnq)),
362        canIssue = VecInit(canIssueVec.take(params.numEnq)).asUInt & ((~fuBusyTableMask(deqIdx)).asUInt & (~intWbBusyTableMask(deqIdx)).asUInt & (~vfWbBusyTableMask(deqIdx)).asUInt)(params.numEnq-1, 0)
363      )
364  }
365
366  val othersEntryOldest = (0 until params.numDeq).map {
367    case deqIdx =>
368      AgeDetector(numEntries = params.numEntries - params.numEnq,
369        enq = VecInit(transCanAcceptVec(deqIdx).zip(transSelVec).map{ case(doCanAccept, transSel) => Mux(doCanAccept, transSel, 0.U)}),
370        deq = VecInit(clearVec.drop(params.numEnq)).asUInt,
371        canIssue = VecInit(canIssueVec.drop(params.numEnq)).asUInt & ((~fuBusyTableMask(deqIdx)).asUInt & (~intWbBusyTableMask(deqIdx)).asUInt & (~vfWbBusyTableMask(deqIdx)).asUInt)(params.numEntries-1, params.numEnq)
372      )
373  }
374
375  finalDeqSelValidVec.head := othersEntryOldest.head.valid || enqEntryOldest.head.valid || subDeqSelValidVec.head.getOrElse(Seq(false.B)).head
376  finalDeqSelOHVec.head := Mux(othersEntryOldest.head.valid, Cat(othersEntryOldest.head.bits, 0.U((params.numEnq).W)),
377                            Mux(enqEntryOldest.head.valid, Cat(0.U((params.numEntries-params.numEnq).W), enqEntryOldest.head.bits),
378                              subDeqSelOHVec.head.getOrElse(Seq(0.U)).head))
379
380  if (params.numDeq == 2) {
381    val chooseOthersOldest = othersEntryOldest(1).valid && Cat(othersEntryOldest(1).bits, 0.U((params.numEnq).W)) =/= finalDeqSelOHVec.head
382    val chooseEnqOldest = enqEntryOldest(1).valid && Cat(0.U((params.numEntries-params.numEnq).W), enqEntryOldest(1).bits) =/= finalDeqSelOHVec.head
383    val choose1stSub = subDeqSelOHVec(1).getOrElse(Seq(0.U)).head =/= finalDeqSelOHVec.head
384
385    finalDeqSelValidVec(1) := MuxCase(subDeqSelValidVec(1).getOrElse(Seq(false.B)).last, Seq(
386      (chooseOthersOldest) -> othersEntryOldest(1).valid,
387      (chooseEnqOldest) -> enqEntryOldest(1).valid,
388      (choose1stSub) -> subDeqSelValidVec(1).getOrElse(Seq(false.B)).head)
389    )
390    finalDeqSelOHVec(1) := MuxCase(subDeqSelOHVec(1).getOrElse(Seq(0.U)).last, Seq(
391      (chooseOthersOldest) -> Cat(othersEntryOldest(1).bits, 0.U((params.numEnq).W)),
392      (chooseEnqOldest) -> Cat(0.U((params.numEntries-params.numEnq).W), enqEntryOldest(1).bits),
393      (choose1stSub) -> subDeqSelOHVec(1).getOrElse(Seq(0.U)).head)
394    )
395  }
396
397  //fuBusyTable
398  fuBusyTableWrite.zip(fuBusyTableRead).zipWithIndex.foreach { case ((busyTableWrite: Option[FuBusyTableWrite], busyTableRead: Option[FuBusyTableRead]), i) =>
399    if(busyTableWrite.nonEmpty) {
400      val btwr = busyTableWrite.get
401      val btrd = busyTableRead.get
402      btwr.io.in.deqResp := io.deqResp(i)
403      btwr.io.in.og0Resp := io.og0Resp(i)
404      btwr.io.in.og1Resp := io.og1Resp(i)
405      btrd.io.in.fuBusyTable := btwr.io.out.fuBusyTable
406      btrd.io.in.fuTypeRegVec := fuTypeVec
407      fuBusyTableMask(i) := btrd.io.out.fuBusyTableMask
408    }
409    else {
410      fuBusyTableMask(i) := 0.U(params.numEntries.W)
411    }
412  }
413
414  //wbfuBusyTable write
415  intWbBusyTableWrite.zip(intWbBusyTableOut).zip(intDeqRespSetOut).zipWithIndex.foreach { case (((busyTableWrite: Option[FuBusyTableWrite], busyTable: Option[UInt]), deqResp), i) =>
416    if(busyTableWrite.nonEmpty) {
417      val btwr = busyTableWrite.get
418      val bt = busyTable.get
419      val dq = deqResp.get
420      btwr.io.in.deqResp := io.deqResp(i)
421      btwr.io.in.og0Resp := io.og0Resp(i)
422      btwr.io.in.og1Resp := io.og1Resp(i)
423      bt := btwr.io.out.fuBusyTable
424      dq := btwr.io.out.deqRespSet
425    }
426  }
427
428  vfWbBusyTableWrite.zip(vfWbBusyTableOut).zip(vfDeqRespSetOut).zipWithIndex.foreach { case (((busyTableWrite: Option[FuBusyTableWrite], busyTable: Option[UInt]), deqResp), i) =>
429    if (busyTableWrite.nonEmpty) {
430      val btwr = busyTableWrite.get
431      val bt = busyTable.get
432      val dq = deqResp.get
433      btwr.io.in.deqResp := io.deqResp(i)
434      btwr.io.in.og0Resp := io.og0Resp(i)
435      btwr.io.in.og1Resp := io.og1Resp(i)
436      bt := btwr.io.out.fuBusyTable
437      dq := btwr.io.out.deqRespSet
438    }
439  }
440
441  //wbfuBusyTable read
442  intWbBusyTableRead.zip(intWbBusyTableIn).zipWithIndex.foreach { case ((busyTableRead: Option[FuBusyTableRead], busyTable: Option[UInt]), i) =>
443    if(busyTableRead.nonEmpty) {
444      val btrd = busyTableRead.get
445      val bt = busyTable.get
446      btrd.io.in.fuBusyTable := bt
447      btrd.io.in.fuTypeRegVec := fuTypeVec
448      intWbBusyTableMask(i) := btrd.io.out.fuBusyTableMask
449    }
450    else {
451      intWbBusyTableMask(i) := 0.U(params.numEntries.W)
452    }
453  }
454  vfWbBusyTableRead.zip(vfWbBusyTableIn).zipWithIndex.foreach { case ((busyTableRead: Option[FuBusyTableRead], busyTable: Option[UInt]), i) =>
455    if (busyTableRead.nonEmpty) {
456      val btrd = busyTableRead.get
457      val bt = busyTable.get
458      btrd.io.in.fuBusyTable := bt
459      btrd.io.in.fuTypeRegVec := fuTypeVec
460      vfWbBusyTableMask(i) := btrd.io.out.fuBusyTableMask
461    }
462    else {
463      vfWbBusyTableMask(i) := 0.U(params.numEntries.W)
464    }
465  }
466
467  wakeUpQueues.zipWithIndex.foreach { case (wakeUpQueueOption, i) =>
468    val og0RespEach = io.og0Resp(i)
469    val og1RespEach = io.og1Resp(i)
470    wakeUpQueueOption.foreach {
471      wakeUpQueue =>
472        val flush = Wire(new WakeupQueueFlush)
473        flush.redirect := io.flush
474        flush.ldCancel := io.ldCancel
475        flush.og0Fail := io.og0Resp(i).valid && RSFeedbackType.isBlocked(io.og0Resp(i).bits.respType)
476        flush.og1Fail := io.og1Resp(i).valid && RSFeedbackType.isBlocked(io.og1Resp(i).bits.respType)
477        wakeUpQueue.io.flush := flush
478        wakeUpQueue.io.enq.valid := io.deq(i).fire && !io.deq(i).bits.common.needCancel(io.og0Cancel, io.og1Cancel) && {
479          if (io.deq(i).bits.common.rfWen.isDefined)
480            io.deq(i).bits.common.rfWen.get && io.deq(i).bits.common.pdest =/= 0.U
481          else
482            true.B
483        }
484        wakeUpQueue.io.enq.bits.uop := io.deq(i).bits.common
485        wakeUpQueue.io.enq.bits.lat := getDeqLat(i, io.deq(i).bits.common.fuType)
486        wakeUpQueue.io.og0IssueFail := flush.og0Fail
487        wakeUpQueue.io.og1IssueFail := flush.og1Fail
488    }
489  }
490
491  io.deq.zipWithIndex.foreach { case (deq, i) =>
492    deq.valid                := finalDeqSelValidVec(i)
493    deq.bits.addrOH          := finalDeqSelOHVec(i)
494    deq.bits.common.isFirstIssue := deqFirstIssueVec(i)
495    deq.bits.common.iqIdx    := OHToUInt(finalDeqSelOHVec(i))
496    deq.bits.common.fuType   := deqEntryVec(i).bits.payload.fuType
497    deq.bits.common.fuOpType := deqEntryVec(i).bits.payload.fuOpType
498    deq.bits.common.rfWen.foreach(_ := deqEntryVec(i).bits.payload.rfWen)
499    deq.bits.common.fpWen.foreach(_ := deqEntryVec(i).bits.payload.fpWen)
500    deq.bits.common.vecWen.foreach(_ := deqEntryVec(i).bits.payload.vecWen)
501    deq.bits.common.flushPipe.foreach(_ := deqEntryVec(i).bits.payload.flushPipe)
502    deq.bits.common.pdest := deqEntryVec(i).bits.payload.pdest
503    deq.bits.common.robIdx := deqEntryVec(i).bits.payload.robIdx
504    deq.bits.common.imm := deqEntryVec(i).bits.imm
505    deq.bits.common.dataSources.zip(finalDataSources(i)).zipWithIndex.foreach {
506      case ((sink, source), srcIdx) =>
507        sink.value := Mux(
508          SrcType.isXp(deqEntryVec(i).bits.payload.srcType(srcIdx)) && deqEntryVec(i).bits.payload.psrc(srcIdx) === 0.U,
509          DataSource.none,
510          source.value
511        )
512    }
513    if(params.hasIQWakeUp) {
514      deq.bits.common.l1ExuVec := finalWakeUpL1ExuOH.get(i)
515    } else {
516      deq.bits.common.l1ExuVec := deqEntryVec(i).bits.payload.l1ExuOH.take(deq.bits.common.l1ExuVec.length)
517    }
518    deq.bits.common.srcTimer.foreach(_ := finalSrcTimer.get(i))
519    deq.bits.common.loadDependency.foreach(_ := deqEntryVec(i).bits.status.mergedLoadDependency.get)
520    deq.bits.common.deqPortIdx.foreach(_ := i.U)
521
522    deq.bits.rf.zip(deqEntryVec(i).bits.payload.psrc).foreach { case (rf, psrc) =>
523      rf.foreach(_.addr := psrc) // psrc in payload array can be pregIdx of IntRegFile or VfRegFile
524    }
525    deq.bits.rf.zip(deqEntryVec(i).bits.payload.srcType).foreach { case (rf, srcType) =>
526      rf.foreach(_.srcType := srcType) // psrc in payload array can be pregIdx of IntRegFile or VfRegFile
527    }
528    deq.bits.srcType.zip(deqEntryVec(i).bits.payload.srcType).foreach { case (sink, source) =>
529      sink := source
530    }
531    deq.bits.immType := deqEntryVec(i).bits.payload.selImm
532
533    // dirty code for lui+addi(w) fusion
534    when (deqEntryVec(i).bits.payload.isLUI32) {
535      val lui_imm = Cat(deqEntryVec(i).bits.payload.lsrc(1), deqEntryVec(i).bits.payload.lsrc(0), deqEntryVec(i).bits.imm(ImmUnion.maxLen - 1, 0))
536      deq.bits.common.imm := ImmUnion.LUI32.toImm32(lui_imm)
537    }
538
539    // dirty code for fused_lui_load
540    when (SrcType.isImm(deqEntryVec(i).bits.payload.srcType(0)) && deqEntryVec(i).bits.payload.fuType === FuType.ldu.U) {
541      deq.bits.common.imm := Imm_LUI_LOAD().getLuiImm(deqEntryVec(i).bits.payload)
542    }
543  }
544
545  private val ldCancels = io.fromCancelNetwork.map(in =>
546    LoadShouldCancel(in.bits.common.loadDependency, io.ldCancel)
547  )
548  private val fromCancelNetworkShift = WireDefault(io.fromCancelNetwork)
549  fromCancelNetworkShift.zip(io.fromCancelNetwork).foreach {
550    case (shifted, original) =>
551      original.ready := shifted.ready // this will not cause combinational loop
552      shifted.bits.common.loadDependency.foreach(
553        _ := original.bits.common.loadDependency.get.map(_ << 1)
554      )
555  }
556  io.deqDelay.zip(fromCancelNetworkShift).zip(ldCancels).foreach { case ((deqDly, deq), ldCancel) =>
557    NewPipelineConnect(
558      deq, deqDly, deqDly.valid,
559      deq.bits.common.robIdx.needFlush(io.flush) || ldCancel,
560      Option("Scheduler2DataPathPipe")
561    )
562  }
563  dontTouch(io.deqDelay)
564  io.wakeupToIQ.zipWithIndex.foreach { case (wakeup, i) =>
565    if (wakeUpQueues(i).nonEmpty && finalWakeUpL1ExuOH.nonEmpty) {
566      wakeup.valid := wakeUpQueues(i).get.io.deq.valid
567      wakeup.bits.fromExuInput(wakeUpQueues(i).get.io.deq.bits, finalWakeUpL1ExuOH.get(i))
568      wakeup.bits.loadDependency := wakeUpQueues(i).get.io.deq.bits.loadDependency.getOrElse(0.U.asTypeOf(wakeup.bits.loadDependency))
569    } else if (wakeUpQueues(i).nonEmpty) {
570      wakeup.valid := wakeUpQueues(i).get.io.deq.valid
571      wakeup.bits.fromExuInput(wakeUpQueues(i).get.io.deq.bits)
572      wakeup.bits.loadDependency := wakeUpQueues(i).get.io.deq.bits.loadDependency.getOrElse(0.U.asTypeOf(wakeup.bits.loadDependency))
573    } else {
574      wakeup.valid := false.B
575      wakeup.bits := 0.U.asTypeOf(wakeup.bits)
576    }
577  }
578
579  // Todo: better counter implementation
580  private val enqHasValid = validVec.take(params.numEnq).reduce(_ | _)
581  private val othersValidCnt = PopCount(validVec.drop(params.numEnq))
582  io.status.leftVec(0) := validVec.drop(params.numEnq).reduce(_ & _)
583  for (i <- 0 until params.numEnq) {
584    io.status.leftVec(i + 1) := othersValidCnt === (params.numEntries - params.numEnq - (i + 1)).U
585  }
586  io.enq.foreach(_.ready := !Cat(io.status.leftVec).orR || !enqHasValid) // Todo: more efficient implementation
587
588  protected def getDeqLat(deqPortIdx: Int, fuType: UInt) : UInt = {
589    val fuLatUIntMaps: Map[UInt, UInt] = fuLatencyMaps(deqPortIdx).map { case (k, v) => (k.U, v.U) }
590    val lat = WireInit(Mux1H(fuLatUIntMaps.keys.map(_ === fuType).toSeq, fuLatUIntMaps.values.toSeq))
591    dontTouch(lat)
592  }
593
594  // issue perf counter
595  // ready instr count
596  XSPerfHistogram("issue_ready_hist", PopCount(validVec.zip(canIssueVec).map(x => x._1 && x._2)), true.B, 0, params.numEntries, 1)
597  for (t <- FuType.functionNameMap.keys) {
598    val fuName = FuType.functionNameMap(t)
599    if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
600      XSPerfHistogram(s"issue_ready_hist_futype_${fuName}", PopCount(validVec.zip(canIssueVec).zip(fuTypeVec).map{ case ((v, c), fu) => v && c && fu === t.U }), true.B, 0, params.numEntries, 1)
601    }
602  }
603
604  // deq instr count
605  XSPerfAccumulate("issue_instr_count", PopCount(io.deq.map(_.valid)))
606  XSPerfHistogram("issue_instr_count_hist", PopCount(io.deq.map(_.valid)), true.B, 0, params.numDeq, 1)
607
608  // deq instr data source count
609  XSPerfAccumulate("issue_datasource_reg", io.deq.map{ deq =>
610    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) })
611  }.reduce(_ +& _))
612  XSPerfAccumulate("issue_datasource_bypass", io.deq.map{ deq =>
613    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) })
614  }.reduce(_ +& _))
615  XSPerfAccumulate("issue_datasource_forward", io.deq.map{ deq =>
616    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) })
617  }.reduce(_ +& _))
618  XSPerfAccumulate("issue_datasource_noreg", io.deq.map{ deq =>
619    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) })
620  }.reduce(_ +& _))
621
622  XSPerfHistogram("issue_datasource_reg_hist", io.deq.map{ deq =>
623    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) })
624  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
625  XSPerfHistogram("issue_datasource_bypass_hist", io.deq.map{ deq =>
626    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) })
627  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
628  XSPerfHistogram("issue_datasource_forward_hist", io.deq.map{ deq =>
629    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) })
630  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
631  XSPerfHistogram("issue_datasource_noreg_hist", io.deq.map{ deq =>
632    PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) })
633  }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
634
635  // deq instr data source count for each futype
636  for (t <- FuType.functionNameMap.keys) {
637    val fuName = FuType.functionNameMap(t)
638    if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
639      XSPerfAccumulate(s"issue_datasource_reg_futype_${fuName}", io.deq.map{ deq =>
640        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
641      }.reduce(_ +& _))
642      XSPerfAccumulate(s"issue_datasource_bypass_futype_${fuName}", io.deq.map{ deq =>
643        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
644      }.reduce(_ +& _))
645      XSPerfAccumulate(s"issue_datasource_forward_futype_${fuName}", io.deq.map{ deq =>
646        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
647      }.reduce(_ +& _))
648      XSPerfAccumulate(s"issue_datasource_noreg_futype_${fuName}", io.deq.map{ deq =>
649        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
650      }.reduce(_ +& _))
651
652      XSPerfHistogram(s"issue_datasource_reg_hist_futype_${fuName}", io.deq.map{ deq =>
653        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.reg && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
654      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
655      XSPerfHistogram(s"issue_datasource_bypass_hist_futype_${fuName}", io.deq.map{ deq =>
656        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.bypass && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
657      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
658      XSPerfHistogram(s"issue_datasource_forward_hist_futype_${fuName}", io.deq.map{ deq =>
659        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && ds.value === DataSource.forward && !SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
660      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
661      XSPerfHistogram(s"issue_datasource_noreg_hist_futype_${fuName}", io.deq.map{ deq =>
662        PopCount(deq.bits.common.dataSources.zipWithIndex.map{ case (ds, j) => deq.valid && SrcType.isNotReg(deq.bits.srcType(j)) && deq.bits.common.fuType === t.U })
663      }.reduce(_ +& _), true.B, 0, params.numDeq * params.numRegSrc, 1)
664    }
665  }
666
667  // cancel instr count
668  if (params.hasIQWakeUp) {
669    val cancelVec: Vec[Bool] = entries.io.cancel.get
670    XSPerfAccumulate("cancel_instr_count", PopCount(validVec.zip(cancelVec).map(x => x._1 & x._2)))
671    XSPerfHistogram("cancel_instr_hist", PopCount(validVec.zip(cancelVec).map(x => x._1 & x._2)), true.B, 0, params.numEntries, 1)
672    for (t <- FuType.functionNameMap.keys) {
673      val fuName = FuType.functionNameMap(t)
674      if (params.getFuCfgs.map(_.fuType == t).reduce(_ | _)) {
675        XSPerfAccumulate(s"cancel_instr_count_futype_${fuName}", PopCount(validVec.zip(cancelVec).zip(fuTypeVec).map{ case ((x, y), fu) => x & y & fu === t.U }))
676        XSPerfHistogram(s"cancel_instr_hist_futype_${fuName}", PopCount(validVec.zip(cancelVec).zip(fuTypeVec).map{ case ((x, y), fu) => x & y & fu === t.U }), true.B, 0, params.numEntries, 1)
677      }
678    }
679  }
680}
681
682class IssueQueueJumpBundle extends Bundle {
683  val pc = UInt(VAddrData().dataWidth.W)
684  val target = UInt(VAddrData().dataWidth.W)
685}
686
687class IssueQueueLoadBundle(implicit p: Parameters) extends XSBundle {
688  val fastMatch = UInt(backendParams.LduCnt.W)
689  val fastImm = UInt(12.W)
690}
691
692class IssueQueueIntIO()(implicit p: Parameters, params: IssueBlockParams) extends IssueQueueIO {
693  val enqJmp = if(params.numPcReadPort > 0) Some(Input(Vec(params.numPcReadPort, new IssueQueueJumpBundle))) else None
694}
695
696class IssueQueueIntImp(override val wrapper: IssueQueue)(implicit p: Parameters, iqParams: IssueBlockParams)
697  extends IssueQueueImp(wrapper)
698{
699  io.suggestName("none")
700  override lazy val io = IO(new IssueQueueIntIO).suggestName("io")
701
702  if(params.needPc) {
703    entries.io.enq.zipWithIndex.foreach { case (entriesEnq, i) =>
704      entriesEnq.bits.status.pc.foreach(_ := io.enq(i).bits.pc)
705      entriesEnq.bits.status.target.foreach(_ := io.enqJmp.get(i).target)
706    }
707  }
708
709  io.deq.zipWithIndex.foreach{ case (deq, i) => {
710    deq.bits.jmp.foreach((deqJmp: IssueQueueJumpBundle) => {
711      deqJmp.pc := deqEntryVec(i).bits.status.pc.get
712      deqJmp.target := deqEntryVec(i).bits.status.target.get
713    })
714    deq.bits.common.preDecode.foreach(_ := deqEntryVec(i).bits.payload.preDecodeInfo)
715    deq.bits.common.ftqIdx.foreach(_ := deqEntryVec(i).bits.payload.ftqPtr)
716    deq.bits.common.ftqOffset.foreach(_ := deqEntryVec(i).bits.payload.ftqOffset)
717    deq.bits.common.predictInfo.foreach(x => {
718      x.target := deqEntryVec(i).bits.status.target.get
719      x.taken := deqEntryVec(i).bits.payload.pred_taken
720    })
721    // for std
722    deq.bits.common.sqIdx.foreach(_ := deqEntryVec(i).bits.payload.sqIdx)
723    // for i2f
724    deq.bits.common.fpu.foreach(_ := deqEntryVec(i).bits.payload.fpu)
725  }}
726}
727
728class IssueQueueVfImp(override val wrapper: IssueQueue)(implicit p: Parameters, iqParams: IssueBlockParams)
729  extends IssueQueueImp(wrapper)
730{
731  s0_enqBits.foreach{ x =>
732    x.srcType(3) := SrcType.vp // v0: mask src
733    x.srcType(4) := SrcType.vp // vl&vtype
734  }
735  io.deq.zipWithIndex.foreach{ case (deq, i) => {
736    deq.bits.common.fpu.foreach(_ := deqEntryVec(i).bits.payload.fpu)
737    deq.bits.common.vpu.foreach(_ := deqEntryVec(i).bits.payload.vpu)
738    deq.bits.common.vpu.foreach(_.vuopIdx := deqEntryVec(i).bits.payload.uopIdx)
739  }}
740}
741
742class IssueQueueMemBundle(implicit p: Parameters, params: IssueBlockParams) extends Bundle {
743  val feedbackIO = Flipped(Vec(params.numDeq, new MemRSFeedbackIO))
744  val checkWait = new Bundle {
745    val stIssuePtr = Input(new SqPtr)
746    val memWaitUpdateReq = Flipped(new MemWaitUpdateReq)
747  }
748  val loadFastMatch = Output(Vec(params.LduCnt, new IssueQueueLoadBundle))
749}
750
751class IssueQueueMemIO(implicit p: Parameters, params: IssueBlockParams) extends IssueQueueIO {
752  val memIO = Some(new IssueQueueMemBundle)
753}
754
755class IssueQueueMemAddrImp(override val wrapper: IssueQueue)(implicit p: Parameters, params: IssueBlockParams)
756  extends IssueQueueImp(wrapper) with HasCircularQueuePtrHelper {
757
758  require(params.StdCnt == 0 && (params.LduCnt + params.StaCnt + params.VlduCnt) > 0, "IssueQueueMemAddrImp can only be instance of MemAddr IQ")
759
760  io.suggestName("none")
761  override lazy val io = IO(new IssueQueueMemIO).suggestName("io")
762  private val memIO = io.memIO.get
763
764  for (i <- io.enq.indices) {
765    val blockNotReleased = isAfter(io.enq(i).bits.sqIdx, memIO.checkWait.stIssuePtr)
766    val storeAddrWaitForIsIssuing = VecInit((0 until StorePipelineWidth).map(i => {
767      memIO.checkWait.memWaitUpdateReq.staIssue(i).valid &&
768        memIO.checkWait.memWaitUpdateReq.staIssue(i).bits.uop.robIdx.value === io.enq(i).bits.waitForRobIdx.value
769    })).asUInt.orR && !io.enq(i).bits.loadWaitStrict // is waiting for store addr ready
770    s0_enqBits(i).loadWaitBit := io.enq(i).bits.loadWaitBit && !storeAddrWaitForIsIssuing && blockNotReleased
771  }
772
773  for (i <- entries.io.enq.indices) {
774    entries.io.enq(i).bits.status match { case enqData =>
775      enqData.blocked := false.B // s0_enqBits(i).loadWaitBit
776      enqData.mem.get.strictWait := s0_enqBits(i).loadWaitStrict
777      enqData.mem.get.waitForStd := false.B
778      enqData.mem.get.waitForRobIdx := s0_enqBits(i).waitForRobIdx
779      enqData.mem.get.waitForSqIdx := 0.U.asTypeOf(enqData.mem.get.waitForSqIdx) // generated by sq, will be updated later
780      enqData.mem.get.sqIdx := s0_enqBits(i).sqIdx
781    }
782
783    entries.io.fromMem.get.slowResp.zipWithIndex.foreach { case (slowResp, i) =>
784      slowResp.valid                 := memIO.feedbackIO(i).feedbackSlow.valid
785      slowResp.bits.robIdx           := memIO.feedbackIO(i).feedbackSlow.bits.robIdx
786      slowResp.bits.respType         := Mux(memIO.feedbackIO(i).feedbackSlow.bits.hit, RSFeedbackType.fuIdle, RSFeedbackType.feedbackInvalid)
787      slowResp.bits.dataInvalidSqIdx := memIO.feedbackIO(i).feedbackSlow.bits.dataInvalidSqIdx
788      slowResp.bits.rfWen := DontCare
789      slowResp.bits.fuType := DontCare
790    }
791
792    entries.io.fromMem.get.fastResp.zipWithIndex.foreach { case (fastResp, i) =>
793      fastResp.valid                 := memIO.feedbackIO(i).feedbackFast.valid
794      fastResp.bits.robIdx           := memIO.feedbackIO(i).feedbackFast.bits.robIdx
795      fastResp.bits.respType         := memIO.feedbackIO(i).feedbackFast.bits.sourceType
796      fastResp.bits.dataInvalidSqIdx := 0.U.asTypeOf(fastResp.bits.dataInvalidSqIdx)
797      fastResp.bits.rfWen := DontCare
798      fastResp.bits.fuType := DontCare
799    }
800
801    entries.io.fromMem.get.memWaitUpdateReq := memIO.checkWait.memWaitUpdateReq
802    entries.io.fromMem.get.stIssuePtr := memIO.checkWait.stIssuePtr
803  }
804
805  io.deq.zipWithIndex.foreach { case (deq, i) =>
806    deq.bits.common.sqIdx.get := deqEntryVec(i).bits.payload.sqIdx
807    deq.bits.common.lqIdx.get := deqEntryVec(i).bits.payload.lqIdx
808    if (params.isLdAddrIQ) {
809      deq.bits.common.ftqIdx.get := deqEntryVec(i).bits.payload.ftqPtr
810      deq.bits.common.ftqOffset.get := deqEntryVec(i).bits.payload.ftqOffset
811    }
812  }
813}