xref: /XiangShan/src/main/scala/xiangshan/frontend/FTB.scala (revision 30f35717e23156cb95b30a36db530384545b48a4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config.Parameters
22import scala.{Tuple2 => &}
23import utility._
24import utility.mbist.MbistPipeline
25import utility.sram.SplittedSRAMTemplate
26import xiangshan._
27
28trait FTBParams extends HasXSParameter with HasBPUConst {
29  val numEntries = FtbSize
30  val numWays    = FtbWays
31  val numSets    = numEntries / numWays // 512
32  val tagLength  = FtbTagLength
33
34  val TAR_STAT_SZ = 2
35  def TAR_FIT     = 0.U(TAR_STAT_SZ.W)
36  def TAR_OVF     = 1.U(TAR_STAT_SZ.W)
37  def TAR_UDF     = 2.U(TAR_STAT_SZ.W)
38
39  def BR_OFFSET_LEN  = 12
40  def JMP_OFFSET_LEN = 20
41
42  def FTBCLOSE_THRESHOLD_SZ = log2Ceil(500)
43  def FTBCLOSE_THRESHOLD    = 500.U(FTBCLOSE_THRESHOLD_SZ.W) // can be modified
44}
45
46class FtbSlot_FtqMem(implicit p: Parameters) extends XSBundle with FTBParams {
47  val offset  = UInt(log2Ceil(PredictWidth).W)
48  val sharing = Bool()
49  val valid   = Bool()
50}
51
52class FtbSlot(val offsetLen: Int, val subOffsetLen: Option[Int] = None)(implicit p: Parameters) extends FtbSlot_FtqMem
53    with FTBParams {
54  if (subOffsetLen.isDefined) {
55    require(subOffsetLen.get <= offsetLen)
56  }
57  val lower   = UInt(offsetLen.W)
58  val tarStat = UInt(TAR_STAT_SZ.W)
59
60  def setLowerStatByTarget(pc: UInt, target: UInt, isShare: Boolean) = {
61    def getTargetStatByHigher(pc_higher: UInt, target_higher: UInt) =
62      Mux(target_higher > pc_higher, TAR_OVF, Mux(target_higher < pc_higher, TAR_UDF, TAR_FIT))
63    def getLowerByTarget(target: UInt, offsetLen: Int) = target(offsetLen, 1)
64    val offLen        = if (isShare) this.subOffsetLen.get else this.offsetLen
65    val pc_higher     = pc(VAddrBits - 1, offLen + 1)
66    val target_higher = target(VAddrBits - 1, offLen + 1)
67    val stat          = getTargetStatByHigher(pc_higher, target_higher)
68    val lower         = ZeroExt(getLowerByTarget(target, offLen), this.offsetLen)
69    this.lower   := lower
70    this.tarStat := stat
71    this.sharing := isShare.B
72  }
73
74  def getTarget(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
75    def getTarget(offLen: Int)(pc: UInt, lower: UInt, stat: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
76      val h                = pc(VAddrBits - 1, offLen + 1)
77      val higher           = Wire(UInt((VAddrBits - offLen - 1).W))
78      val higher_plus_one  = Wire(UInt((VAddrBits - offLen - 1).W))
79      val higher_minus_one = Wire(UInt((VAddrBits - offLen - 1).W))
80
81      // Switch between previous stage pc and current stage pc
82      // Give flexibility for timing
83      if (last_stage.isDefined) {
84        val last_stage_pc   = last_stage.get._1
85        val last_stage_pc_h = last_stage_pc(VAddrBits - 1, offLen + 1)
86        val stage_en        = last_stage.get._2
87        higher           := RegEnable(last_stage_pc_h, stage_en)
88        higher_plus_one  := RegEnable(last_stage_pc_h + 1.U, stage_en)
89        higher_minus_one := RegEnable(last_stage_pc_h - 1.U, stage_en)
90      } else {
91        higher           := h
92        higher_plus_one  := h + 1.U
93        higher_minus_one := h - 1.U
94      }
95      val target =
96        Cat(
97          Mux1H(Seq(
98            (stat === TAR_OVF, higher_plus_one),
99            (stat === TAR_UDF, higher_minus_one),
100            (stat === TAR_FIT, higher)
101          )),
102          lower(offLen - 1, 0),
103          0.U(1.W)
104        )
105      require(target.getWidth == VAddrBits)
106      require(offLen != 0)
107      target
108    }
109    if (subOffsetLen.isDefined)
110      Mux(
111        sharing,
112        getTarget(subOffsetLen.get)(pc, lower, tarStat, last_stage),
113        getTarget(offsetLen)(pc, lower, tarStat, last_stage)
114      )
115    else
116      getTarget(offsetLen)(pc, lower, tarStat, last_stage)
117  }
118  def fromAnotherSlot(that: FtbSlot) = {
119    require(
120      this.offsetLen > that.offsetLen && this.subOffsetLen.map(_ == that.offsetLen).getOrElse(true) ||
121        this.offsetLen == that.offsetLen
122    )
123    this.offset  := that.offset
124    this.tarStat := that.tarStat
125    this.sharing := (this.offsetLen > that.offsetLen && that.offsetLen == this.subOffsetLen.get).B
126    this.valid   := that.valid
127    this.lower   := ZeroExt(that.lower, this.offsetLen)
128  }
129
130  def slotConsistent(that: FtbSlot) =
131    VecInit(
132      this.offset === that.offset,
133      this.lower === that.lower,
134      this.tarStat === that.tarStat,
135      this.sharing === that.sharing,
136      this.valid === that.valid
137    ).reduce(_ && _)
138
139}
140
141class FTBEntry_part(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
142  val isCall = Bool()
143  val isRet  = Bool()
144  val isJalr = Bool()
145
146  def isJal = !isJalr
147}
148
149class FTBEntry_FtqMem(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
150
151  val brSlots  = Vec(numBrSlot, new FtbSlot_FtqMem)
152  val tailSlot = new FtbSlot_FtqMem
153
154  def jmpValid =
155    tailSlot.valid && !tailSlot.sharing
156
157  def getBrRecordedVec(offset: UInt) =
158    VecInit(
159      brSlots.map(s => s.valid && s.offset === offset) :+
160        (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
161    )
162
163  def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_ || _)
164
165  def getBrMaskByOffset(offset: UInt) =
166    brSlots.map { s =>
167      s.valid && s.offset <= offset
168    } :+
169      (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
170
171  def newBrCanNotInsert(offset: UInt) = {
172    val lastSlotForBr = tailSlot
173    lastSlotForBr.valid && lastSlotForBr.offset < offset
174  }
175
176}
177
178class FTBEntry(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
179
180  val valid = Bool()
181
182  val brSlots = Vec(numBrSlot, new FtbSlot(BR_OFFSET_LEN))
183
184  val tailSlot = new FtbSlot(JMP_OFFSET_LEN, Some(BR_OFFSET_LEN))
185
186  // Partial Fall-Through Address
187  val pftAddr = UInt(log2Up(PredictWidth).W)
188  val carry   = Bool()
189
190  val last_may_be_rvi_call = Bool()
191
192  // Mark the conditional branch for the first jump and the jalr instruction that appears for the first time,
193  // and train the tag/ittage without using its results when strong_bias is true.
194  val strong_bias = Vec(numBr, Bool())
195
196  def getSlotForBr(idx: Int): FtbSlot = {
197    require(idx <= numBr - 1)
198    (idx, numBr) match {
199      case (i, n) if i == n - 1 => this.tailSlot
200      case _                    => this.brSlots(idx)
201    }
202  }
203  def allSlotsForBr =
204    (0 until numBr).map(getSlotForBr(_))
205  def setByBrTarget(brIdx: Int, pc: UInt, target: UInt) = {
206    val slot = getSlotForBr(brIdx)
207    slot.setLowerStatByTarget(pc, target, brIdx == numBr - 1)
208  }
209  def setByJmpTarget(pc: UInt, target: UInt) =
210    this.tailSlot.setLowerStatByTarget(pc, target, false)
211
212  def getTargetVec(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
213    /*
214    Previous design: Use the getTarget function of FTBSlot to calculate three sets of targets separately;
215    During this process, nine sets of registers will be generated to register the values of the higher plus one minus one
216    Current design: Reuse the duplicate parts of the original nine sets of registers,
217    calculate the common high bits last_stage_pc_higher of brtarget and jmptarget,
218    and the high bits last_stage_pc_middle that need to be added and subtracted from each other,
219    and then concatenate them according to the carry situation to obtain brtarget and jmptarget
220     */
221    val h_br                  = pc(VAddrBits - 1, BR_OFFSET_LEN + 1)
222    val higher_br             = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
223    val higher_plus_one_br    = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
224    val higher_minus_one_br   = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
225    val h_tail                = pc(VAddrBits - 1, JMP_OFFSET_LEN + 1)
226    val higher_tail           = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
227    val higher_plus_one_tail  = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
228    val higher_minus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
229    if (last_stage.isDefined) {
230      val last_stage_pc                  = last_stage.get._1
231      val stage_en                       = last_stage.get._2
232      val last_stage_pc_higher           = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1), stage_en)
233      val last_stage_pc_middle           = RegEnable(last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1), stage_en)
234      val last_stage_pc_higher_plus_one  = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) + 1.U, stage_en)
235      val last_stage_pc_higher_minus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) - 1.U, stage_en)
236      val last_stage_pc_middle_plus_one =
237        RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) + 1.U, stage_en)
238      val last_stage_pc_middle_minus_one =
239        RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) - 1.U, stage_en)
240
241      higher_br := Cat(last_stage_pc_higher, last_stage_pc_middle)
242      higher_plus_one_br := Mux(
243        last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
244        Cat(last_stage_pc_higher_plus_one, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0)),
245        Cat(last_stage_pc_higher, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0))
246      )
247      higher_minus_one_br := Mux(
248        last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
249        Cat(last_stage_pc_higher_minus_one, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0)),
250        Cat(last_stage_pc_higher, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0))
251      )
252
253      higher_tail           := last_stage_pc_higher
254      higher_plus_one_tail  := last_stage_pc_higher_plus_one
255      higher_minus_one_tail := last_stage_pc_higher_minus_one
256    } else {
257      higher_br             := h_br
258      higher_plus_one_br    := h_br + 1.U
259      higher_minus_one_br   := h_br - 1.U
260      higher_tail           := h_tail
261      higher_plus_one_tail  := h_tail + 1.U
262      higher_minus_one_tail := h_tail - 1.U
263    }
264    val br_slots_targets = VecInit(brSlots.map(s =>
265      Cat(
266        Mux1H(Seq(
267          (s.tarStat === TAR_OVF, higher_plus_one_br),
268          (s.tarStat === TAR_UDF, higher_minus_one_br),
269          (s.tarStat === TAR_FIT, higher_br)
270        )),
271        s.lower(s.offsetLen - 1, 0),
272        0.U(1.W)
273      )
274    ))
275    val tail_target = Wire(UInt(VAddrBits.W))
276    if (tailSlot.subOffsetLen.isDefined) {
277      tail_target := Mux(
278        tailSlot.sharing,
279        Cat(
280          Mux1H(Seq(
281            (tailSlot.tarStat === TAR_OVF, higher_plus_one_br),
282            (tailSlot.tarStat === TAR_UDF, higher_minus_one_br),
283            (tailSlot.tarStat === TAR_FIT, higher_br)
284          )),
285          tailSlot.lower(tailSlot.subOffsetLen.get - 1, 0),
286          0.U(1.W)
287        ),
288        Cat(
289          Mux1H(Seq(
290            (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
291            (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
292            (tailSlot.tarStat === TAR_FIT, higher_tail)
293          )),
294          tailSlot.lower(tailSlot.offsetLen - 1, 0),
295          0.U(1.W)
296        )
297      )
298    } else {
299      tail_target := Cat(
300        Mux1H(Seq(
301          (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
302          (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
303          (tailSlot.tarStat === TAR_FIT, higher_tail)
304        )),
305        tailSlot.lower(tailSlot.offsetLen - 1, 0),
306        0.U(1.W)
307      )
308    }
309
310    br_slots_targets.map(t => require(t.getWidth == VAddrBits))
311    require(tail_target.getWidth == VAddrBits)
312    val targets = VecInit(br_slots_targets :+ tail_target)
313    targets
314  }
315
316  def getOffsetVec = VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
317  def getFallThrough(pc: UInt, last_stage_entry: Option[Tuple2[FTBEntry, Bool]] = None) =
318    if (last_stage_entry.isDefined) {
319      var stashed_carry = RegEnable(last_stage_entry.get._1.carry, last_stage_entry.get._2)
320      getFallThroughAddr(pc, stashed_carry, pftAddr)
321    } else {
322      getFallThroughAddr(pc, carry, pftAddr)
323    }
324
325  def hasBr(offset: UInt) =
326    brSlots.map(s => s.valid && s.offset <= offset).reduce(_ || _) ||
327      (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
328
329  def getBrMaskByOffset(offset: UInt) =
330    brSlots.map { s =>
331      s.valid && s.offset <= offset
332    } :+
333      (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
334
335  def getBrRecordedVec(offset: UInt) =
336    VecInit(
337      brSlots.map(s => s.valid && s.offset === offset) :+
338        (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
339    )
340
341  def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_ || _)
342
343  def brValids =
344    VecInit(
345      brSlots.map(_.valid) :+ (tailSlot.valid && tailSlot.sharing)
346    )
347
348  def noEmptySlotForNewBr =
349    VecInit(brSlots.map(_.valid) :+ tailSlot.valid).reduce(_ && _)
350
351  def newBrCanNotInsert(offset: UInt) = {
352    val lastSlotForBr = tailSlot
353    lastSlotForBr.valid && lastSlotForBr.offset < offset
354  }
355
356  def jmpValid =
357    tailSlot.valid && !tailSlot.sharing
358
359  def brOffset =
360    VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
361
362  def entryConsistent(that: FTBEntry) = {
363    val validDiff = this.valid === that.valid
364    val brSlotsDiffSeq: IndexedSeq[Bool] =
365      this.brSlots.zip(that.brSlots).map {
366        case (x, y) => x.slotConsistent(y)
367      }
368    val tailSlotDiff         = this.tailSlot.slotConsistent(that.tailSlot)
369    val pftAddrDiff          = this.pftAddr === that.pftAddr
370    val carryDiff            = this.carry === that.carry
371    val isCallDiff           = this.isCall === that.isCall
372    val isRetDiff            = this.isRet === that.isRet
373    val isJalrDiff           = this.isJalr === that.isJalr
374    val lastMayBeRviCallDiff = this.last_may_be_rvi_call === that.last_may_be_rvi_call
375    val alwaysTakenDiff: IndexedSeq[Bool] =
376      this.strong_bias.zip(that.strong_bias).map {
377        case (x, y) => x === y
378      }
379    VecInit(
380      validDiff,
381      brSlotsDiffSeq.reduce(_ && _),
382      tailSlotDiff,
383      pftAddrDiff,
384      carryDiff,
385      isCallDiff,
386      isRetDiff,
387      isJalrDiff,
388      lastMayBeRviCallDiff,
389      alwaysTakenDiff.reduce(_ && _)
390    ).reduce(_ && _)
391  }
392
393  def display(cond: Bool): Unit = {
394    XSDebug(cond, p"-----------FTB entry----------- \n")
395    XSDebug(cond, p"v=${valid}\n")
396    for (i <- 0 until numBr) {
397      XSDebug(
398        cond,
399        p"[br$i]: v=${allSlotsForBr(i).valid}, offset=${allSlotsForBr(i).offset}," +
400          p"lower=${Hexadecimal(allSlotsForBr(i).lower)}\n"
401      )
402    }
403    XSDebug(
404      cond,
405      p"[tailSlot]: v=${tailSlot.valid}, offset=${tailSlot.offset}," +
406        p"lower=${Hexadecimal(tailSlot.lower)}, sharing=${tailSlot.sharing}}\n"
407    )
408    XSDebug(cond, p"pftAddr=${Hexadecimal(pftAddr)}, carry=$carry\n")
409    XSDebug(cond, p"isCall=$isCall, isRet=$isRet, isjalr=$isJalr\n")
410    XSDebug(cond, p"last_may_be_rvi_call=$last_may_be_rvi_call\n")
411    XSDebug(cond, p"------------------------------- \n")
412  }
413
414}
415
416class FTBEntryWithTag(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
417  val entry = new FTBEntry
418  val tag   = UInt(tagLength.W)
419  def display(cond: Bool): Unit = {
420    entry.display(cond)
421    XSDebug(cond, p"tag is ${Hexadecimal(tag)}\n------------------------------- \n")
422  }
423}
424
425class FTBMeta(implicit p: Parameters) extends XSBundle with FTBParams {
426  val writeWay   = UInt(log2Ceil(numWays).W)
427  val hit        = Bool()
428  val pred_cycle = if (!env.FPGAPlatform) Some(UInt(64.W)) else None
429}
430
431object FTBMeta {
432  def apply(writeWay: UInt, hit: Bool, pred_cycle: UInt)(implicit p: Parameters): FTBMeta = {
433    val e = Wire(new FTBMeta)
434    e.writeWay := writeWay
435    e.hit      := hit
436    e.pred_cycle.map(_ := pred_cycle)
437    e
438  }
439}
440
441// class UpdateQueueEntry(implicit p: Parameters) extends XSBundle with FTBParams {
442//   val pc = UInt(VAddrBits.W)
443//   val ftb_entry = new FTBEntry
444//   val hit = Bool()
445//   val hit_way = UInt(log2Ceil(numWays).W)
446// }
447//
448// object UpdateQueueEntry {
449//   def apply(pc: UInt, fe: FTBEntry, hit: Bool, hit_way: UInt)(implicit p: Parameters): UpdateQueueEntry = {
450//     val e = Wire(new UpdateQueueEntry)
451//     e.pc := pc
452//     e.ftb_entry := fe
453//     e.hit := hit
454//     e.hit_way := hit_way
455//     e
456//   }
457// }
458
459class FTBTableAddr(val idxBits: Int, val banks: Int, val skewedBits: Int)(implicit p: Parameters) extends XSBundle {
460  val addr = new TableAddr(idxBits, banks)
461  def getIdx(x: UInt) = addr.getIdx(x) ^ Cat(addr.getTag(x), addr.getIdx(x))(idxBits + skewedBits - 1, skewedBits)
462  def getTag(x: UInt) = addr.getTag(x)
463}
464
465class FTB(implicit p: Parameters) extends BasePredictor with FTBParams with BPUUtils
466    with HasCircularQueuePtrHelper with HasPerfEvents {
467  override val meta_size = WireInit(0.U.asTypeOf(new FTBMeta)).getWidth
468
469  val ftbAddr = new FTBTableAddr(log2Up(numSets), 1, 3)
470
471  class FTBBank(val numSets: Int, val nWays: Int) extends XSModule with BPUUtils {
472    val io = IO(new Bundle {
473      val s1_fire = Input(Bool())
474
475      // when ftb hit, read_hits.valid is true, and read_hits.bits is OH of hit way
476      // when ftb not hit, read_hits.valid is false, and read_hits is OH of allocWay
477      // val read_hits = Valid(Vec(numWays, Bool()))
478      val req_pc    = Flipped(DecoupledIO(UInt(VAddrBits.W)))
479      val read_resp = Output(new FTBEntry)
480      val read_hits = Valid(UInt(log2Ceil(numWays).W))
481
482      val read_multi_entry = Output(new FTBEntry)
483      val read_multi_hits  = Valid(UInt(log2Ceil(numWays).W))
484
485      val u_req_pc      = Flipped(DecoupledIO(UInt(VAddrBits.W)))
486      val update_hits   = Valid(UInt(log2Ceil(numWays).W))
487      val update_access = Input(Bool())
488
489      val update_pc          = Input(UInt(VAddrBits.W))
490      val update_write_data  = Flipped(Valid(new FTBEntryWithTag))
491      val update_write_way   = Input(UInt(log2Ceil(numWays).W))
492      val update_write_alloc = Input(Bool())
493    })
494
495    // Extract holdRead logic to fix bug that update read override predict read result
496    val ftb = Module(new SplittedSRAMTemplate(
497      new FTBEntryWithTag,
498      set = numSets,
499      way = numWays,
500      dataSplit = 8,
501      shouldReset = true,
502      holdRead = false,
503      singlePort = true,
504      withClockGate = true,
505      hasMbist = hasMbist,
506      hasSramCtl = hasSramCtl
507    ))
508    private val mbistPl = MbistPipeline.PlaceMbistPipeline(1, "MbistPipeFtb", hasMbist)
509    val ftb_r_entries   = ftb.io.r.resp.data.map(_.entry)
510
511    val pred_rdata = HoldUnless(
512      ftb.io.r.resp.data,
513      RegNext(io.req_pc.valid && !io.update_access),
514      init = Some(VecInit.fill(numWays)(0.U.asTypeOf(new FTBEntryWithTag)))
515    ) // rdata has ftb_entry.valid, shoud reset
516    ftb.io.r.req.valid := io.req_pc.valid || io.u_req_pc.valid // io.s0_fire
517    ftb.io.r.req.bits.setIdx := Mux(
518      io.u_req_pc.valid,
519      ftbAddr.getIdx(io.u_req_pc.bits),
520      ftbAddr.getIdx(io.req_pc.bits)
521    ) // s0_idx
522
523    assert(!(io.req_pc.valid && io.u_req_pc.valid))
524
525    io.req_pc.ready   := ftb.io.r.req.ready
526    io.u_req_pc.ready := ftb.io.r.req.ready
527
528    val req_tag = RegEnable(ftbAddr.getTag(io.req_pc.bits)(tagLength - 1, 0), io.req_pc.valid)
529    val req_idx = RegEnable(ftbAddr.getIdx(io.req_pc.bits), io.req_pc.valid)
530
531    val u_req_tag = RegEnable(ftbAddr.getTag(io.u_req_pc.bits)(tagLength - 1, 0), io.u_req_pc.valid)
532
533    val read_entries = pred_rdata.map(_.entry)
534    val read_tags    = pred_rdata.map(_.tag)
535
536    val total_hits =
537      VecInit((0 until numWays).map(b => read_tags(b) === req_tag && read_entries(b).valid && io.s1_fire))
538    val hit = total_hits.reduce(_ || _)
539    // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
540    val hit_way = OHToUInt(total_hits)
541
542    // There may be two hits in the four paths of the ftbBank, and the OHToUInt will fail.
543    // If there is a redirect in s2 at this time, the wrong FTBEntry will be used to calculate the target,
544    // resulting in an address error and affecting performance.
545    // The solution is to select a hit entry during multi hit as the entry for s2.
546    // Considering timing, use this entry in s3 and trigger s3-redirect.
547    val total_hits_reg   = RegEnable(total_hits, io.s1_fire)
548    val read_entries_reg = read_entries.map(w => RegEnable(w, io.s1_fire))
549
550    val multi_hit = VecInit((0 until numWays).map {
551      i =>
552        (0 until numWays).map { j =>
553          if (i < j) total_hits_reg(i) && total_hits_reg(j)
554          else false.B
555        }.reduce(_ || _)
556    }).reduce(_ || _)
557    val multi_way = PriorityMux(Seq.tabulate(numWays)(i => (total_hits_reg(i)) -> i.asUInt(log2Ceil(numWays).W)))
558    val multi_hit_selectEntry = PriorityMux(Seq.tabulate(numWays)(i => (total_hits_reg(i)) -> read_entries_reg(i)))
559
560    // Check if the entry read by ftbBank is legal.
561    for (n <- 0 to numWays - 1) {
562      val req_pc_reg       = RegEnable(io.req_pc.bits, 0.U.asTypeOf(io.req_pc.bits), io.req_pc.valid)
563      val req_pc_reg_lower = Cat(0.U(1.W), req_pc_reg(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
564      val ftbEntryEndLowerwithCarry = Cat(read_entries(n).carry, read_entries(n).pftAddr)
565      val fallThroughErr            = req_pc_reg_lower + PredictWidth.U >= ftbEntryEndLowerwithCarry
566      when(read_entries(n).valid && total_hits(n) && io.s1_fire) {
567        assert(fallThroughErr, s"FTB read sram entry in way${n} fallThrough address error!")
568      }
569    }
570
571    val u_total_hits = VecInit((0 until numWays).map(b =>
572      ftb.io.r.resp.data(b).tag === u_req_tag && ftb.io.r.resp.data(b).entry.valid && RegNext(io.update_access)
573    ))
574    val u_hit = u_total_hits.reduce(_ || _)
575    // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
576    val u_hit_way = OHToUInt(u_total_hits)
577
578    // assert(PopCount(total_hits) === 1.U || PopCount(total_hits) === 0.U)
579    // assert(PopCount(u_total_hits) === 1.U || PopCount(u_total_hits) === 0.U)
580    for (n <- 1 to numWays) {
581      XSPerfAccumulate(f"ftb_pred_${n}_way_hit", PopCount(total_hits) === n.U)
582      XSPerfAccumulate(f"ftb_update_${n}_way_hit", PopCount(u_total_hits) === n.U)
583    }
584
585    val replacer = ReplacementPolicy.fromString(Some("setplru"), numWays, numSets)
586    // val allocWriteWay = replacer.way(req_idx)
587
588    val touch_set = Seq.fill(1)(Wire(UInt(log2Ceil(numSets).W)))
589    val touch_way = Seq.fill(1)(Wire(Valid(UInt(log2Ceil(numWays).W))))
590
591    val write_set = Wire(UInt(log2Ceil(numSets).W))
592    val write_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
593
594    val read_set = Wire(UInt(log2Ceil(numSets).W))
595    val read_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
596
597    read_set       := req_idx
598    read_way.valid := hit
599    read_way.bits  := hit_way
600
601    // Read replacer access is postponed for 1 cycle
602    // this helps timing
603    touch_set(0)       := Mux(write_way.valid, write_set, RegNext(read_set))
604    touch_way(0).valid := write_way.valid || RegNext(read_way.valid)
605    touch_way(0).bits  := Mux(write_way.valid, write_way.bits, RegNext(read_way.bits))
606
607    replacer.access(touch_set, touch_way)
608
609    // Select the update allocate way
610    // Selection logic:
611    //    1. if any entries within the same index is not valid, select it
612    //    2. if all entries is valid, use replacer
613    def allocWay(valids: UInt, idx: UInt): UInt =
614      if (numWays > 1) {
615        val w     = Wire(UInt(log2Up(numWays).W))
616        val valid = WireInit(valids.andR)
617        w := Mux(valid, replacer.way(idx), PriorityEncoder(~valids))
618        w
619      } else {
620        val w = WireInit(0.U(log2Up(numWays).W))
621        w
622      }
623
624    io.read_resp       := Mux1H(total_hits, read_entries) // Mux1H
625    io.read_hits.valid := hit
626    io.read_hits.bits  := hit_way
627
628    io.read_multi_entry      := multi_hit_selectEntry
629    io.read_multi_hits.valid := multi_hit
630    io.read_multi_hits.bits  := multi_way
631
632    io.update_hits.valid := u_hit
633    io.update_hits.bits  := u_hit_way
634
635    // Update logic
636    val u_valid       = io.update_write_data.valid
637    val u_data        = io.update_write_data.bits
638    val u_idx         = ftbAddr.getIdx(io.update_pc)
639    val allocWriteWay = allocWay(RegNext(VecInit(ftb_r_entries.map(_.valid))).asUInt, u_idx)
640    val u_way         = Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
641    val u_mask        = UIntToOH(u_way)
642
643    for (i <- 0 until numWays) {
644      XSPerfAccumulate(f"ftb_replace_way$i", u_valid && io.update_write_alloc && u_way === i.U)
645      XSPerfAccumulate(
646        f"ftb_replace_way${i}_has_empty",
647        u_valid && io.update_write_alloc && !ftb_r_entries.map(_.valid).reduce(_ && _) && u_way === i.U
648      )
649      XSPerfAccumulate(f"ftb_hit_way$i", hit && !io.update_access && hit_way === i.U)
650    }
651
652    ftb.io.w.apply(u_valid, u_data, u_idx, u_mask)
653
654    // for replacer
655    write_set       := u_idx
656    write_way.valid := u_valid
657    write_way.bits  := Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
658
659    // print hit entry info
660    Mux1H(total_hits, ftb.io.r.resp.data).display(true.B)
661  } // FTBBank
662
663  // FTB switch register & temporary storage of fauftb prediction results
664  val s0_close_ftb_req            = RegInit(false.B)
665  val s1_close_ftb_req            = RegEnable(s0_close_ftb_req, false.B, io.s0_fire(0))
666  val s2_close_ftb_req            = RegEnable(s1_close_ftb_req, false.B, io.s1_fire(0))
667  val s2_fauftb_ftb_entry_dup     = io.s1_fire.map(f => RegEnable(io.fauftb_entry_in, f))
668  val s2_fauftb_ftb_entry_hit_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_hit_in, f))
669
670  val ftbBank = Module(new FTBBank(numSets, numWays))
671
672  // for close ftb read_req
673  ftbBank.io.req_pc.valid := io.s0_fire(0) && !s0_close_ftb_req
674  ftbBank.io.req_pc.bits  := s0_pc_dup(0)
675
676  val s2_multi_hit        = ftbBank.io.read_multi_hits.valid && io.s2_fire(0)
677  val s2_multi_hit_way    = ftbBank.io.read_multi_hits.bits
678  val s2_multi_hit_entry  = ftbBank.io.read_multi_entry
679  val s2_multi_hit_enable = s2_multi_hit && !s2_close_ftb_req
680  XSPerfAccumulate("ftb_s2_multi_hit", s2_multi_hit)
681  XSPerfAccumulate("ftb_s2_multi_hit_enable", s2_multi_hit_enable)
682
683  // After closing ftb, the entry output from s2 is the entry of FauFTB cached in s1
684  val btb_enable_dup   = dup(RegNext(io.ctrl.btb_enable))
685  val s1_read_resp     = Mux(s1_close_ftb_req, io.fauftb_entry_in, ftbBank.io.read_resp)
686  val s2_ftbBank_dup   = io.s1_fire.map(f => RegEnable(ftbBank.io.read_resp, f))
687  val s2_ftb_entry_dup = dup(0.U.asTypeOf(new FTBEntry))
688  for (
689    ((s2_fauftb_entry, s2_ftbBank_entry), s2_ftb_entry) <-
690      s2_fauftb_ftb_entry_dup zip s2_ftbBank_dup zip s2_ftb_entry_dup
691  ) {
692    s2_ftb_entry := Mux(s2_close_ftb_req, s2_fauftb_entry, s2_ftbBank_entry)
693  }
694  val s3_ftb_entry_dup = io.s2_fire.zip(s2_ftb_entry_dup).map { case (f, e) =>
695    RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_entry, e), f)
696  }
697  val real_s2_ftb_entry         = Mux(s2_multi_hit_enable, s2_multi_hit_entry, s2_ftb_entry_dup(0))
698  val real_s2_pc                = s2_pc_dup(0).getAddr()
699  val real_s2_startLower        = Cat(0.U(1.W), real_s2_pc(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
700  val real_s2_endLowerwithCarry = Cat(real_s2_ftb_entry.carry, real_s2_ftb_entry.pftAddr)
701  val real_s2_fallThroughErr =
702    real_s2_startLower >= real_s2_endLowerwithCarry || real_s2_endLowerwithCarry > (real_s2_startLower + PredictWidth.U)
703  val real_s3_fallThroughErr_dup = io.s2_fire.map(f => RegEnable(real_s2_fallThroughErr, f))
704
705  // After closing ftb, the hit output from s2 is the hit of FauFTB cached in s1.
706  // s1_hit is the ftbBank hit.
707  val s1_hit         = Mux(s1_close_ftb_req, false.B, ftbBank.io.read_hits.valid && io.ctrl.btb_enable)
708  val s2_ftb_hit_dup = io.s1_fire.map(f => RegEnable(s1_hit, 0.B, f))
709  val s2_hit_dup     = dup(0.U.asTypeOf(Bool()))
710  for (
711    ((s2_fauftb_hit, s2_ftb_hit), s2_hit) <-
712      s2_fauftb_ftb_entry_hit_dup zip s2_ftb_hit_dup zip s2_hit_dup
713  ) {
714    s2_hit := Mux(s2_close_ftb_req, s2_fauftb_hit, s2_ftb_hit)
715  }
716  val s3_hit_dup = io.s2_fire.zip(s2_hit_dup).map { case (f, h) =>
717    RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit, h), 0.B, f)
718  }
719  val s3_multi_hit_dup  = io.s2_fire.map(f => RegEnable(s2_multi_hit_enable, f))
720  val writeWay          = Mux(s1_close_ftb_req, 0.U, ftbBank.io.read_hits.bits)
721  val s2_ftb_meta       = RegEnable(FTBMeta(writeWay.asUInt, s1_hit, GTimer()).asUInt, io.s1_fire(0))
722  val s2_multi_hit_meta = FTBMeta(s2_multi_hit_way.asUInt, s2_multi_hit, GTimer()).asUInt
723
724  // Consistent count of entries for fauftb and ftb
725  val fauftb_ftb_entry_consistent_counter = RegInit(0.U(FTBCLOSE_THRESHOLD_SZ.W))
726  val fauftb_ftb_entry_consistent         = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftbBank_dup(0))
727
728  // if close ftb_req, the counter need keep
729  when(io.s2_fire(0) && s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0)) {
730    fauftb_ftb_entry_consistent_counter := Mux(
731      fauftb_ftb_entry_consistent,
732      fauftb_ftb_entry_consistent_counter + 1.U,
733      0.U
734    )
735  }.elsewhen(io.s2_fire(0) && !s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0)) {
736    fauftb_ftb_entry_consistent_counter := 0.U
737  }
738
739  when((fauftb_ftb_entry_consistent_counter >= FTBCLOSE_THRESHOLD) && io.s0_fire(0)) {
740    s0_close_ftb_req := true.B
741  }
742
743  val update_valid = RegNext(io.update.valid, init = false.B)
744  val update       = Wire(new BranchPredictionUpdate)
745  update := RegEnable(io.update.bits, io.update.valid)
746
747  // The pc register has been moved outside of predictor, pc field of update bundle and other update data are not in the same stage
748  // so io.update.bits.pc is used directly here
749  val update_pc = io.update.bits.pc
750
751  // To improve Clock Gating Efficiency
752  update.meta := RegEnable(io.update.bits.meta, io.update.valid && !io.update.bits.old_entry)
753
754  // Clear counter during false_hit or ifuRedirect
755  val ftb_false_hit = WireInit(false.B)
756  val needReopen    = s0_close_ftb_req && (ftb_false_hit || io.redirectFromIFU)
757  ftb_false_hit := update_valid && update.false_hit
758  when(needReopen) {
759    fauftb_ftb_entry_consistent_counter := 0.U
760    s0_close_ftb_req                    := false.B
761  }
762
763  val s2_close_consistent     = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftb_entry_dup(0))
764  val s2_not_close_consistent = s2_ftbBank_dup(0).entryConsistent(s2_ftb_entry_dup(0))
765
766  when(s2_close_ftb_req && io.s2_fire(0)) {
767    assert(s2_close_consistent, s"Entry inconsistency after ftb req is closed!")
768  }.elsewhen(!s2_close_ftb_req && io.s2_fire(0)) {
769    assert(s2_not_close_consistent, s"Entry inconsistency after ftb req is not closed!")
770  }
771
772  val reopenCounter         = !s1_close_ftb_req && s2_close_ftb_req && io.s2_fire(0)
773  val falseHitReopenCounter = ftb_false_hit && s1_close_ftb_req
774  XSPerfAccumulate("ftb_req_reopen_counter", reopenCounter)
775  XSPerfAccumulate("false_hit_reopen_Counter", falseHitReopenCounter)
776  XSPerfAccumulate("ifuRedirec_needReopen", s1_close_ftb_req && io.redirectFromIFU)
777  XSPerfAccumulate("this_cycle_is_close", s2_close_ftb_req && io.s2_fire(0))
778  XSPerfAccumulate("this_cycle_is_open", !s2_close_ftb_req && io.s2_fire(0))
779
780  // io.out.bits.resp := RegEnable(io.in.bits.resp_in(0), 0.U.asTypeOf(new BranchPredictionResp), io.s1_fire)
781  io.out := io.in.bits.resp_in(0)
782
783  io.out.s2.full_pred.map { case fp => fp.multiHit := false.B }
784
785  io.out.s2.full_pred.zip(s2_hit_dup).map { case (fp, h) => fp.hit := h }
786  for (
787    full_pred & s2_ftb_entry & s2_pc & s1_pc & s1_fire <-
788      io.out.s2.full_pred zip s2_ftb_entry_dup zip s2_pc_dup zip s1_pc_dup zip io.s1_fire
789  ) {
790    full_pred.fromFtbEntry(
791      s2_ftb_entry,
792      s2_pc.getAddr(),
793      // Previous stage meta for better timing
794      Some(s1_pc, s1_fire),
795      Some(s1_read_resp, s1_fire)
796    )
797  }
798
799  io.out.s3.full_pred.zip(s3_hit_dup).map { case (fp, h) => fp.hit := h }
800  io.out.s3.full_pred.zip(s3_multi_hit_dup).map { case (fp, m) => fp.multiHit := m }
801  for (
802    full_pred & s3_ftb_entry & s3_pc & s2_pc & s2_fire <-
803      io.out.s3.full_pred zip s3_ftb_entry_dup zip s3_pc_dup zip s2_pc_dup zip io.s2_fire
804  )
805    full_pred.fromFtbEntry(s3_ftb_entry, s3_pc.getAddr(), Some((s2_pc.getAddr(), s2_fire)))
806
807  // Overwrite the fallThroughErr value
808  io.out.s3.full_pred.zipWithIndex.map { case (fp, i) => fp.fallThroughErr := real_s3_fallThroughErr_dup(i) }
809
810  io.out.last_stage_ftb_entry := s3_ftb_entry_dup(0)
811  io.out.last_stage_meta      := RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_meta, s2_ftb_meta), io.s2_fire(0))
812  io.out.s1_ftbCloseReq       := s1_close_ftb_req
813  io.out.s1_uftbHit           := io.fauftb_entry_hit_in
814  val s1_uftbHasIndirect = io.fauftb_entry_in.jmpValid &&
815    io.fauftb_entry_in.isJalr && !io.fauftb_entry_in.isRet // uFTB determines that it's real JALR, RET and JAL are excluded
816  io.out.s1_uftbHasIndirect := s1_uftbHasIndirect
817
818  // always taken logic
819  for (i <- 0 until numBr) {
820    for (
821      out_fp & in_fp & s2_hit & s2_ftb_entry <-
822        io.out.s2.full_pred zip io.in.bits.resp_in(0).s2.full_pred zip s2_hit_dup zip s2_ftb_entry_dup
823    )
824      out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s2_hit && s2_ftb_entry.strong_bias(i)
825    for (
826      out_fp & in_fp & s3_hit & s3_ftb_entry <-
827        io.out.s3.full_pred zip io.in.bits.resp_in(0).s3.full_pred zip s3_hit_dup zip s3_ftb_entry_dup
828    )
829      out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s3_hit && s3_ftb_entry.strong_bias(i)
830  }
831
832  val s3_pc_diff       = s3_pc_dup(0).getAddr()
833  val s3_pc_startLower = Cat(0.U(1.W), s3_pc_diff(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
834  val s3_ftb_entry_endLowerwithCarry = Cat(s3_ftb_entry_dup(0).carry, s3_ftb_entry_dup(0).pftAddr)
835  val fallThroughErr =
836    s3_pc_startLower >= s3_ftb_entry_endLowerwithCarry || s3_ftb_entry_endLowerwithCarry > (s3_pc_startLower + PredictWidth.U)
837  XSError(
838    s3_ftb_entry_dup(0).valid && s3_hit_dup(0) && io.s3_fire(0) && fallThroughErr,
839    "FTB read sram entry in s3 fallThrough address error!"
840  )
841
842  // Update logic
843  val u_meta  = update.meta.asTypeOf(new FTBMeta)
844  val u_valid = update_valid && !update.old_entry && !s0_close_ftb_req
845
846  val (_, delay2_pc)    = DelayNWithValid(update_pc, u_valid, 2)
847  val (_, delay2_entry) = DelayNWithValid(update.ftb_entry, u_valid, 2)
848
849  val update_now       = u_valid && u_meta.hit
850  val update_need_read = u_valid && !u_meta.hit
851  // stall one more cycle because we use a whole cycle to do update read tag hit
852  io.s1_ready := ftbBank.io.req_pc.ready && !update_need_read && !RegNext(update_need_read)
853
854  ftbBank.io.u_req_pc.valid := update_need_read
855  ftbBank.io.u_req_pc.bits  := update_pc
856
857  val ftb_write = Wire(new FTBEntryWithTag)
858  ftb_write.entry := Mux(update_now, update.ftb_entry, delay2_entry)
859  ftb_write.tag   := ftbAddr.getTag(Mux(update_now, update_pc, delay2_pc))(tagLength - 1, 0)
860
861  val write_valid = update_now || DelayN(u_valid && !u_meta.hit, 2)
862  val write_pc    = Mux(update_now, update_pc, delay2_pc)
863
864  ftbBank.io.update_write_data.valid := write_valid
865  ftbBank.io.update_write_data.bits  := ftb_write
866  ftbBank.io.update_pc               := write_pc
867  ftbBank.io.update_write_way := Mux(
868    update_now,
869    u_meta.writeWay,
870    RegNext(ftbBank.io.update_hits.bits)
871  ) // use it one cycle later
872  ftbBank.io.update_write_alloc := Mux(
873    update_now,
874    false.B,
875    RegNext(!ftbBank.io.update_hits.valid)
876  ) // use it one cycle later
877  ftbBank.io.update_access := u_valid && !u_meta.hit
878  ftbBank.io.s1_fire       := io.s1_fire(0)
879
880  val ftb_write_fallThrough = ftb_write.entry.getFallThrough(write_pc)
881  when(write_valid) {
882    assert(write_pc + (FetchWidth * 4).U >= ftb_write_fallThrough, s"FTB write_entry fallThrough address error!")
883  }
884
885  XSDebug("req_v=%b, req_pc=%x, ready=%b (resp at next cycle)\n", io.s0_fire(0), s0_pc_dup(0), ftbBank.io.req_pc.ready)
886  XSDebug("s2_hit=%b, hit_way=%b\n", s2_hit_dup(0), writeWay.asUInt)
887  XSDebug(
888    "s2_br_taken_mask=%b, s2_real_taken_mask=%b\n",
889    io.in.bits.resp_in(0).s2.full_pred(0).br_taken_mask.asUInt,
890    io.out.s2.full_pred(0).real_slot_taken_mask().asUInt
891  )
892  XSDebug("s2_target=%x\n", io.out.s2.getTarget(0))
893
894  s2_ftb_entry_dup(0).display(true.B)
895
896  XSPerfAccumulate("ftb_read_hits", RegNext(io.s0_fire(0)) && s1_hit)
897  XSPerfAccumulate("ftb_read_misses", RegNext(io.s0_fire(0)) && !s1_hit)
898
899  XSPerfAccumulate("ftb_commit_hits", update_valid && u_meta.hit)
900  XSPerfAccumulate("ftb_commit_misses", update_valid && !u_meta.hit)
901
902  XSPerfAccumulate("ftb_update_req", update_valid)
903  XSPerfAccumulate("ftb_update_ignored", update_valid && update.old_entry)
904  XSPerfAccumulate("ftb_updated", u_valid)
905  XSPerfAccumulate("ftb_closing_update_counter", s0_close_ftb_req && u_valid)
906
907  override val perfEvents = Seq(
908    ("ftb_commit_hits            ", update_valid && u_meta.hit),
909    ("ftb_commit_misses          ", update_valid && !u_meta.hit)
910  )
911  generatePerfEvent()
912}
913