FTB.scala (cabb9f41435550594f71a9193e296f0aa870e057) FTB.scala (cf7d6b7a1a781c73aeb87de112de2e7fe5ea3b7c)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8* http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8* http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
19import chisel3._
20import chisel3.util._
22import xiangshan._
23import utils._
21import org.chipsalliance.cde.config.Parameters
22import os.copy
23import scala.{Tuple2 => &}
24import scala.math.min
24import utility._
25import utility._
26import utils._
27import xiangshan._
25
28
26import scala.math.min
27import scala.{Tuple2 => &}
28import os.copy
29
30
31trait FTBParams extends HasXSParameter with HasBPUConst {
32 val numEntries = FtbSize
33 val numWays = FtbWays
29trait FTBParams extends HasXSParameter with HasBPUConst {
30 val numEntries = FtbSize
31 val numWays = FtbWays
34 val numSets = numEntries/numWays // 512
32 val numSets = numEntries / numWays // 512
35 val tagSize = 20
36
33 val tagSize = 20
34
37
38
39 val TAR_STAT_SZ = 2
35 val TAR_STAT_SZ = 2
40 def TAR_FIT = 0.U(TAR_STAT_SZ.W)
41 def TAR_OVF = 1.U(TAR_STAT_SZ.W)
42 def TAR_UDF = 2.U(TAR_STAT_SZ.W)
36 def TAR_FIT = 0.U(TAR_STAT_SZ.W)
37 def TAR_OVF = 1.U(TAR_STAT_SZ.W)
38 def TAR_UDF = 2.U(TAR_STAT_SZ.W)
43
39
44 def BR_OFFSET_LEN = 12
40 def BR_OFFSET_LEN = 12
45 def JMP_OFFSET_LEN = 20
46
47 def FTBCLOSE_THRESHOLD_SZ = log2Ceil(500)
41 def JMP_OFFSET_LEN = 20
42
43 def FTBCLOSE_THRESHOLD_SZ = log2Ceil(500)
48 def FTBCLOSE_THRESHOLD = 500.U(FTBCLOSE_THRESHOLD_SZ.W) //can be modified
44 def FTBCLOSE_THRESHOLD = 500.U(FTBCLOSE_THRESHOLD_SZ.W) // can be modified
49}
50
51class FtbSlot_FtqMem(implicit p: Parameters) extends XSBundle with FTBParams {
52 val offset = UInt(log2Ceil(PredictWidth).W)
53 val sharing = Bool()
54 val valid = Bool()
55}
56
45}
46
47class FtbSlot_FtqMem(implicit p: Parameters) extends XSBundle with FTBParams {
48 val offset = UInt(log2Ceil(PredictWidth).W)
49 val sharing = Bool()
50 val valid = Bool()
51}
52
57class FtbSlot(val offsetLen: Int, val subOffsetLen: Option[Int] = None)(implicit p: Parameters) extends FtbSlot_FtqMem with FTBParams {
53class FtbSlot(val offsetLen: Int, val subOffsetLen: Option[Int] = None)(implicit p: Parameters) extends FtbSlot_FtqMem
54 with FTBParams {
58 if (subOffsetLen.isDefined) {
59 require(subOffsetLen.get <= offsetLen)
60 }
61 val lower = UInt(offsetLen.W)
62 val tarStat = UInt(TAR_STAT_SZ.W)
63
64 def setLowerStatByTarget(pc: UInt, target: UInt, isShare: Boolean) = {
65 def getTargetStatByHigher(pc_higher: UInt, target_higher: UInt) =
55 if (subOffsetLen.isDefined) {
56 require(subOffsetLen.get <= offsetLen)
57 }
58 val lower = UInt(offsetLen.W)
59 val tarStat = UInt(TAR_STAT_SZ.W)
60
61 def setLowerStatByTarget(pc: UInt, target: UInt, isShare: Boolean) = {
62 def getTargetStatByHigher(pc_higher: UInt, target_higher: UInt) =
66 Mux(target_higher > pc_higher, TAR_OVF,
67 Mux(target_higher < pc_higher, TAR_UDF, TAR_FIT))
63 Mux(target_higher > pc_higher, TAR_OVF, Mux(target_higher < pc_higher, TAR_UDF, TAR_FIT))
68 def getLowerByTarget(target: UInt, offsetLen: Int) = target(offsetLen, 1)
64 def getLowerByTarget(target: UInt, offsetLen: Int) = target(offsetLen, 1)
69 val offLen = if (isShare) this.subOffsetLen.get else this.offsetLen
70 val pc_higher = pc(VAddrBits-1, offLen+1)
71 val target_higher = target(VAddrBits-1, offLen+1)
72 val stat = getTargetStatByHigher(pc_higher, target_higher)
73 val lower = ZeroExt(getLowerByTarget(target, offLen), this.offsetLen)
74 this.lower := lower
65 val offLen = if (isShare) this.subOffsetLen.get else this.offsetLen
66 val pc_higher = pc(VAddrBits - 1, offLen + 1)
67 val target_higher = target(VAddrBits - 1, offLen + 1)
68 val stat = getTargetStatByHigher(pc_higher, target_higher)
69 val lower = ZeroExt(getLowerByTarget(target, offLen), this.offsetLen)
70 this.lower := lower
75 this.tarStat := stat
76 this.sharing := isShare.B
77 }
78
79 def getTarget(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
71 this.tarStat := stat
72 this.sharing := isShare.B
73 }
74
75 def getTarget(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
80 def getTarget(offLen: Int)(pc: UInt, lower: UInt, stat: UInt,
81 last_stage: Option[Tuple2[UInt, Bool]] = None) = {
76 def getTarget(offLen: Int)(pc: UInt, lower: UInt, stat: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
82 val h = pc(VAddrBits - 1, offLen + 1)
83 val higher = Wire(UInt((VAddrBits - offLen - 1).W))
84 val higher_plus_one = Wire(UInt((VAddrBits - offLen - 1).W))
77 val h = pc(VAddrBits - 1, offLen + 1)
78 val higher = Wire(UInt((VAddrBits - offLen - 1).W))
79 val higher_plus_one = Wire(UInt((VAddrBits - offLen - 1).W))
85 val higher_minus_one = Wire(UInt((VAddrBits-offLen-1).W))
80 val higher_minus_one = Wire(UInt((VAddrBits - offLen - 1).W))
86
87 // Switch between previous stage pc and current stage pc
88 // Give flexibility for timing
89 if (last_stage.isDefined) {
81
82 // Switch between previous stage pc and current stage pc
83 // Give flexibility for timing
84 if (last_stage.isDefined) {
90 val last_stage_pc = last_stage.get._1
91 val last_stage_pc_h = last_stage_pc(VAddrBits-1, offLen+1)
92 val stage_en = last_stage.get._2
93 higher := RegEnable(last_stage_pc_h, stage_en)
94 higher_plus_one := RegEnable(last_stage_pc_h+1.U, stage_en)
95 higher_minus_one := RegEnable(last_stage_pc_h-1.U, stage_en)
85 val last_stage_pc = last_stage.get._1
86 val last_stage_pc_h = last_stage_pc(VAddrBits - 1, offLen + 1)
87 val stage_en = last_stage.get._2
88 higher := RegEnable(last_stage_pc_h, stage_en)
89 higher_plus_one := RegEnable(last_stage_pc_h + 1.U, stage_en)
90 higher_minus_one := RegEnable(last_stage_pc_h - 1.U, stage_en)
96 } else {
91 } else {
97 higher := h
98 higher_plus_one := h + 1.U
92 higher := h
93 higher_plus_one := h + 1.U
99 higher_minus_one := h - 1.U
100 }
101 val target =
102 Cat(
103 Mux1H(Seq(
104 (stat === TAR_OVF, higher_plus_one),
105 (stat === TAR_UDF, higher_minus_one),
94 higher_minus_one := h - 1.U
95 }
96 val target =
97 Cat(
98 Mux1H(Seq(
99 (stat === TAR_OVF, higher_plus_one),
100 (stat === TAR_UDF, higher_minus_one),
106 (stat === TAR_FIT, higher),
101 (stat === TAR_FIT, higher)
107 )),
102 )),
108 lower(offLen-1, 0), 0.U(1.W)
103 lower(offLen - 1, 0),
104 0.U(1.W)
109 )
110 require(target.getWidth == VAddrBits)
111 require(offLen != 0)
112 target
113 }
114 if (subOffsetLen.isDefined)
105 )
106 require(target.getWidth == VAddrBits)
107 require(offLen != 0)
108 target
109 }
110 if (subOffsetLen.isDefined)
115 Mux(sharing,
111 Mux(
112 sharing,
116 getTarget(subOffsetLen.get)(pc, lower, tarStat, last_stage),
117 getTarget(offsetLen)(pc, lower, tarStat, last_stage)
118 )
119 else
120 getTarget(offsetLen)(pc, lower, tarStat, last_stage)
121 }
122 def fromAnotherSlot(that: FtbSlot) = {
123 require(
124 this.offsetLen > that.offsetLen && this.subOffsetLen.map(_ == that.offsetLen).getOrElse(true) ||
113 getTarget(subOffsetLen.get)(pc, lower, tarStat, last_stage),
114 getTarget(offsetLen)(pc, lower, tarStat, last_stage)
115 )
116 else
117 getTarget(offsetLen)(pc, lower, tarStat, last_stage)
118 }
119 def fromAnotherSlot(that: FtbSlot) = {
120 require(
121 this.offsetLen > that.offsetLen && this.subOffsetLen.map(_ == that.offsetLen).getOrElse(true) ||
125 this.offsetLen == that.offsetLen
122 this.offsetLen == that.offsetLen
126 )
123 )
127 this.offset := that.offset
124 this.offset := that.offset
128 this.tarStat := that.tarStat
129 this.sharing := (this.offsetLen > that.offsetLen && that.offsetLen == this.subOffsetLen.get).B
125 this.tarStat := that.tarStat
126 this.sharing := (this.offsetLen > that.offsetLen && that.offsetLen == this.subOffsetLen.get).B
130 this.valid := that.valid
131 this.lower := ZeroExt(that.lower, this.offsetLen)
127 this.valid := that.valid
128 this.lower := ZeroExt(that.lower, this.offsetLen)
132 }
133
129 }
130
134 def slotConsistent(that: FtbSlot) = {
131 def slotConsistent(that: FtbSlot) =
135 VecInit(
132 VecInit(
136 this.offset === that.offset,
137 this.lower === that.lower,
133 this.offset === that.offset,
134 this.lower === that.lower,
138 this.tarStat === that.tarStat,
139 this.sharing === that.sharing,
135 this.tarStat === that.tarStat,
136 this.sharing === that.sharing,
140 this.valid === that.valid
141 ).reduce(_&&_)
142 }
137 this.valid === that.valid
138 ).reduce(_ && _)
143
144}
145
139
140}
141
146
147class FTBEntry_part(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
142class FTBEntry_part(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
148 val isCall = Bool()
149 val isRet = Bool()
150 val isJalr = Bool()
143 val isCall = Bool()
144 val isRet = Bool()
145 val isJalr = Bool()
151
152 def isJal = !isJalr
153}
154
155class FTBEntry_FtqMem(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
156
146
147 def isJal = !isJalr
148}
149
150class FTBEntry_FtqMem(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
151
157 val brSlots = Vec(numBrSlot, new FtbSlot_FtqMem)
152 val brSlots = Vec(numBrSlot, new FtbSlot_FtqMem)
158 val tailSlot = new FtbSlot_FtqMem
159
153 val tailSlot = new FtbSlot_FtqMem
154
160 def jmpValid = {
155 def jmpValid =
161 tailSlot.valid && !tailSlot.sharing
156 tailSlot.valid && !tailSlot.sharing
162 }
163
157
164 def getBrRecordedVec(offset: UInt) = {
158 def getBrRecordedVec(offset: UInt) =
165 VecInit(
166 brSlots.map(s => s.valid && s.offset === offset) :+
159 VecInit(
160 brSlots.map(s => s.valid && s.offset === offset) :+
167 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
161 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
168 )
162 )
169 }
170
163
171 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_||_)
164 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_ || _)
172
173 def getBrMaskByOffset(offset: UInt) =
165
166 def getBrMaskByOffset(offset: UInt) =
174 brSlots.map{ s => s.valid && s.offset <= offset } :+
175 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
176
167 brSlots.map { s =>
168 s.valid && s.offset <= offset
169 } :+
170 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
171
177 def newBrCanNotInsert(offset: UInt) = {
178 val lastSlotForBr = tailSlot
179 lastSlotForBr.valid && lastSlotForBr.offset < offset
180 }
181
182}
183
184class FTBEntry(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
185
172 def newBrCanNotInsert(offset: UInt) = {
173 val lastSlotForBr = tailSlot
174 lastSlotForBr.valid && lastSlotForBr.offset < offset
175 }
176
177}
178
179class FTBEntry(implicit p: Parameters) extends FTBEntry_part with FTBParams with BPUUtils {
180
181 val valid = Bool()
186
182
187 val valid = Bool()
188
189 val brSlots = Vec(numBrSlot, new FtbSlot(BR_OFFSET_LEN))
190
191 val tailSlot = new FtbSlot(JMP_OFFSET_LEN, Some(BR_OFFSET_LEN))
192
193 // Partial Fall-Through Address
183 val brSlots = Vec(numBrSlot, new FtbSlot(BR_OFFSET_LEN))
184
185 val tailSlot = new FtbSlot(JMP_OFFSET_LEN, Some(BR_OFFSET_LEN))
186
187 // Partial Fall-Through Address
194 val pftAddr = UInt(log2Up(PredictWidth).W)
195 val carry = Bool()
188 val pftAddr = UInt(log2Up(PredictWidth).W)
189 val carry = Bool()
196
197 val last_may_be_rvi_call = Bool()
198
199 val always_taken = Vec(numBr, Bool())
200
201 def getSlotForBr(idx: Int): FtbSlot = {
190
191 val last_may_be_rvi_call = Bool()
192
193 val always_taken = Vec(numBr, Bool())
194
195 def getSlotForBr(idx: Int): FtbSlot = {
202 require(idx <= numBr-1)
196 require(idx <= numBr - 1)
203 (idx, numBr) match {
197 (idx, numBr) match {
204 case (i, n) if i == n-1 => this.tailSlot
205 case _ => this.brSlots(idx)
198 case (i, n) if i == n - 1 => this.tailSlot
199 case _ => this.brSlots(idx)
206 }
207 }
200 }
201 }
208 def allSlotsForBr = {
202 def allSlotsForBr =
209 (0 until numBr).map(getSlotForBr(_))
203 (0 until numBr).map(getSlotForBr(_))
210 }
211 def setByBrTarget(brIdx: Int, pc: UInt, target: UInt) = {
212 val slot = getSlotForBr(brIdx)
204 def setByBrTarget(brIdx: Int, pc: UInt, target: UInt) = {
205 val slot = getSlotForBr(brIdx)
213 slot.setLowerStatByTarget(pc, target, brIdx == numBr-1)
206 slot.setLowerStatByTarget(pc, target, brIdx == numBr - 1)
214 }
207 }
215 def setByJmpTarget(pc: UInt, target: UInt) = {
208 def setByJmpTarget(pc: UInt, target: UInt) =
216 this.tailSlot.setLowerStatByTarget(pc, target, false)
209 this.tailSlot.setLowerStatByTarget(pc, target, false)
217 }
218
219 def getTargetVec(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
220 /*
221 Previous design: Use the getTarget function of FTBSlot to calculate three sets of targets separately;
222 During this process, nine sets of registers will be generated to register the values of the higher plus one minus one
223 Current design: Reuse the duplicate parts of the original nine sets of registers,
224 calculate the common high bits last_stage_pc_higher of brtarget and jmptarget,
225 and the high bits last_stage_pc_middle that need to be added and subtracted from each other,
226 and then concatenate them according to the carry situation to obtain brtarget and jmptarget
210
211 def getTargetVec(pc: UInt, last_stage: Option[Tuple2[UInt, Bool]] = None) = {
212 /*
213 Previous design: Use the getTarget function of FTBSlot to calculate three sets of targets separately;
214 During this process, nine sets of registers will be generated to register the values of the higher plus one minus one
215 Current design: Reuse the duplicate parts of the original nine sets of registers,
216 calculate the common high bits last_stage_pc_higher of brtarget and jmptarget,
217 and the high bits last_stage_pc_middle that need to be added and subtracted from each other,
218 and then concatenate them according to the carry situation to obtain brtarget and jmptarget
227 */
228 val h_br = pc(VAddrBits - 1, BR_OFFSET_LEN + 1)
229 val higher_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
230 val higher_plus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
231 val higher_minus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
232 val h_tail = pc(VAddrBits - 1, JMP_OFFSET_LEN + 1)
219 */
220 val h_br = pc(VAddrBits - 1, BR_OFFSET_LEN + 1)
221 val higher_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
222 val higher_plus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
223 val higher_minus_one_br = Wire(UInt((VAddrBits - BR_OFFSET_LEN - 1).W))
224 val h_tail = pc(VAddrBits - 1, JMP_OFFSET_LEN + 1)
233 val higher_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
234 val higher_plus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
235 val higher_minus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
236 if (last_stage.isDefined) {
225 val higher_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
226 val higher_plus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
227 val higher_minus_one_tail = Wire(UInt((VAddrBits - JMP_OFFSET_LEN - 1).W))
228 if (last_stage.isDefined) {
237 val last_stage_pc = last_stage.get._1
238 val stage_en = last_stage.get._2
239 val last_stage_pc_higher = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1), stage_en)
240 val last_stage_pc_middle = RegEnable(last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1), stage_en)
229 val last_stage_pc = last_stage.get._1
230 val stage_en = last_stage.get._2
231 val last_stage_pc_higher = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1), stage_en)
232 val last_stage_pc_middle = RegEnable(last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1), stage_en)
241 val last_stage_pc_higher_plus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) + 1.U, stage_en)
242 val last_stage_pc_higher_minus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) - 1.U, stage_en)
233 val last_stage_pc_higher_plus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) + 1.U, stage_en)
234 val last_stage_pc_higher_minus_one = RegEnable(last_stage_pc(VAddrBits - 1, JMP_OFFSET_LEN + 1) - 1.U, stage_en)
243 val last_stage_pc_middle_plus_one = RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) + 1.U, stage_en)
244 val last_stage_pc_middle_minus_one = RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) - 1.U, stage_en)
235 val last_stage_pc_middle_plus_one =
236 RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) + 1.U, stage_en)
237 val last_stage_pc_middle_minus_one =
238 RegEnable(Cat(0.U(1.W), last_stage_pc(JMP_OFFSET_LEN, BR_OFFSET_LEN + 1)) - 1.U, stage_en)
245
246 higher_br := Cat(last_stage_pc_higher, last_stage_pc_middle)
247 higher_plus_one_br := Mux(
239
240 higher_br := Cat(last_stage_pc_higher, last_stage_pc_middle)
241 higher_plus_one_br := Mux(
248 last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
249 Cat(last_stage_pc_higher_plus_one, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)),
250 Cat(last_stage_pc_higher, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)))
242 last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
243 Cat(last_stage_pc_higher_plus_one, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0)),
244 Cat(last_stage_pc_higher, last_stage_pc_middle_plus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0))
245 )
251 higher_minus_one_br := Mux(
246 higher_minus_one_br := Mux(
252 last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
253 Cat(last_stage_pc_higher_minus_one, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)),
254 Cat(last_stage_pc_higher, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN-1, 0)))
247 last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN),
248 Cat(last_stage_pc_higher_minus_one, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0)),
249 Cat(last_stage_pc_higher, last_stage_pc_middle_minus_one(JMP_OFFSET_LEN - BR_OFFSET_LEN - 1, 0))
250 )
255
251
256 higher_tail := last_stage_pc_higher
257 higher_plus_one_tail := last_stage_pc_higher_plus_one
252 higher_tail := last_stage_pc_higher
253 higher_plus_one_tail := last_stage_pc_higher_plus_one
258 higher_minus_one_tail := last_stage_pc_higher_minus_one
254 higher_minus_one_tail := last_stage_pc_higher_minus_one
259 }else{
260 higher_br := h_br
261 higher_plus_one_br := h_br + 1.U
262 higher_minus_one_br := h_br - 1.U
263 higher_tail := h_tail
264 higher_plus_one_tail := h_tail + 1.U
255 } else {
256 higher_br := h_br
257 higher_plus_one_br := h_br + 1.U
258 higher_minus_one_br := h_br - 1.U
259 higher_tail := h_tail
260 higher_plus_one_tail := h_tail + 1.U
265 higher_minus_one_tail := h_tail - 1.U
266 }
267 val br_slots_targets = VecInit(brSlots.map(s =>
268 Cat(
261 higher_minus_one_tail := h_tail - 1.U
262 }
263 val br_slots_targets = VecInit(brSlots.map(s =>
264 Cat(
269 Mux1H(Seq(
270 (s.tarStat === TAR_OVF, higher_plus_one_br),
271 (s.tarStat === TAR_UDF, higher_minus_one_br),
272 (s.tarStat === TAR_FIT, higher_br),
273 )),
274 s.lower(s.offsetLen-1, 0), 0.U(1.W)
275 )
265 Mux1H(Seq(
266 (s.tarStat === TAR_OVF, higher_plus_one_br),
267 (s.tarStat === TAR_UDF, higher_minus_one_br),
268 (s.tarStat === TAR_FIT, higher_br)
269 )),
270 s.lower(s.offsetLen - 1, 0),
271 0.U(1.W)
272 )
276 ))
277 val tail_target = Wire(UInt(VAddrBits.W))
273 ))
274 val tail_target = Wire(UInt(VAddrBits.W))
278 if(tailSlot.subOffsetLen.isDefined){
279 tail_target := Mux(tailSlot.sharing,
275 if (tailSlot.subOffsetLen.isDefined) {
276 tail_target := Mux(
277 tailSlot.sharing,
280 Cat(
281 Mux1H(Seq(
282 (tailSlot.tarStat === TAR_OVF, higher_plus_one_br),
283 (tailSlot.tarStat === TAR_UDF, higher_minus_one_br),
278 Cat(
279 Mux1H(Seq(
280 (tailSlot.tarStat === TAR_OVF, higher_plus_one_br),
281 (tailSlot.tarStat === TAR_UDF, higher_minus_one_br),
284 (tailSlot.tarStat === TAR_FIT, higher_br),
282 (tailSlot.tarStat === TAR_FIT, higher_br)
285 )),
283 )),
286 tailSlot.lower(tailSlot.subOffsetLen.get-1, 0), 0.U(1.W)
284 tailSlot.lower(tailSlot.subOffsetLen.get - 1, 0),
285 0.U(1.W)
287 ),
288 Cat(
289 Mux1H(Seq(
290 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
291 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
286 ),
287 Cat(
288 Mux1H(Seq(
289 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
290 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
292 (tailSlot.tarStat === TAR_FIT, higher_tail),
291 (tailSlot.tarStat === TAR_FIT, higher_tail)
293 )),
292 )),
294 tailSlot.lower(tailSlot.offsetLen-1, 0), 0.U(1.W)
293 tailSlot.lower(tailSlot.offsetLen - 1, 0),
294 0.U(1.W)
295 )
296 )
295 )
296 )
297 }else{
297 } else {
298 tail_target := Cat(
298 tail_target := Cat(
299 Mux1H(Seq(
300 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
301 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
302 (tailSlot.tarStat === TAR_FIT, higher_tail),
303 )),
304 tailSlot.lower(tailSlot.offsetLen-1, 0), 0.U(1.W)
305 )
299 Mux1H(Seq(
300 (tailSlot.tarStat === TAR_OVF, higher_plus_one_tail),
301 (tailSlot.tarStat === TAR_UDF, higher_minus_one_tail),
302 (tailSlot.tarStat === TAR_FIT, higher_tail)
303 )),
304 tailSlot.lower(tailSlot.offsetLen - 1, 0),
305 0.U(1.W)
306 )
306 }
307
308 br_slots_targets.map(t => require(t.getWidth == VAddrBits))
309 require(tail_target.getWidth == VAddrBits)
310 val targets = VecInit(br_slots_targets :+ tail_target)
311 targets
312 }
313
314 def getOffsetVec = VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
307 }
308
309 br_slots_targets.map(t => require(t.getWidth == VAddrBits))
310 require(tail_target.getWidth == VAddrBits)
311 val targets = VecInit(br_slots_targets :+ tail_target)
312 targets
313 }
314
315 def getOffsetVec = VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
315 def getFallThrough(pc: UInt, last_stage_entry: Option[Tuple2[FTBEntry, Bool]] = None) = {
316 def getFallThrough(pc: UInt, last_stage_entry: Option[Tuple2[FTBEntry, Bool]] = None) =
316 if (last_stage_entry.isDefined) {
317 var stashed_carry = RegEnable(last_stage_entry.get._1.carry, last_stage_entry.get._2)
318 getFallThroughAddr(pc, stashed_carry, pftAddr)
319 } else {
320 getFallThroughAddr(pc, carry, pftAddr)
321 }
317 if (last_stage_entry.isDefined) {
318 var stashed_carry = RegEnable(last_stage_entry.get._1.carry, last_stage_entry.get._2)
319 getFallThroughAddr(pc, stashed_carry, pftAddr)
320 } else {
321 getFallThroughAddr(pc, carry, pftAddr)
322 }
322 }
323
324 def hasBr(offset: UInt) =
323
324 def hasBr(offset: UInt) =
325 brSlots.map{ s => s.valid && s.offset <= offset}.reduce(_||_) ||
326 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
325 brSlots.map(s => s.valid && s.offset <= offset).reduce(_ || _) ||
326 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
327
328 def getBrMaskByOffset(offset: UInt) =
327
328 def getBrMaskByOffset(offset: UInt) =
329 brSlots.map{ s => s.valid && s.offset <= offset } :+
330 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
329 brSlots.map { s =>
330 s.valid && s.offset <= offset
331 } :+
332 (tailSlot.valid && tailSlot.offset <= offset && tailSlot.sharing)
331
333
332 def getBrRecordedVec(offset: UInt) = {
334 def getBrRecordedVec(offset: UInt) =
333 VecInit(
334 brSlots.map(s => s.valid && s.offset === offset) :+
335 VecInit(
336 brSlots.map(s => s.valid && s.offset === offset) :+
335 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
337 (tailSlot.valid && tailSlot.offset === offset && tailSlot.sharing)
336 )
338 )
337 }
338
339
339 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_||_)
340 def brIsSaved(offset: UInt) = getBrRecordedVec(offset).reduce(_ || _)
340
341
341 def brValids = {
342 def brValids =
342 VecInit(
343 brSlots.map(_.valid) :+ (tailSlot.valid && tailSlot.sharing)
344 )
343 VecInit(
344 brSlots.map(_.valid) :+ (tailSlot.valid && tailSlot.sharing)
345 )
345 }
346
346
347 def noEmptySlotForNewBr = {
348 VecInit(brSlots.map(_.valid) :+ tailSlot.valid).reduce(_&&_)
349 }
347 def noEmptySlotForNewBr =
348 VecInit(brSlots.map(_.valid) :+ tailSlot.valid).reduce(_ && _)
350
351 def newBrCanNotInsert(offset: UInt) = {
352 val lastSlotForBr = tailSlot
353 lastSlotForBr.valid && lastSlotForBr.offset < offset
354 }
355
349
350 def newBrCanNotInsert(offset: UInt) = {
351 val lastSlotForBr = tailSlot
352 lastSlotForBr.valid && lastSlotForBr.offset < offset
353 }
354
356 def jmpValid = {
355 def jmpValid =
357 tailSlot.valid && !tailSlot.sharing
356 tailSlot.valid && !tailSlot.sharing
358 }
359
357
360 def brOffset = {
358 def brOffset =
361 VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
359 VecInit(brSlots.map(_.offset) :+ tailSlot.offset)
362 }
363
364 def entryConsistent(that: FTBEntry) = {
360
361 def entryConsistent(that: FTBEntry) = {
365 val validDiff = this.valid === that.valid
366 val brSlotsDiffSeq : IndexedSeq[Bool] =
367 this.brSlots.zip(that.brSlots).map{
368 case(x, y) => x.slotConsistent(y)
362 val validDiff = this.valid === that.valid
363 val brSlotsDiffSeq: IndexedSeq[Bool] =
364 this.brSlots.zip(that.brSlots).map {
365 case (x, y) => x.slotConsistent(y)
369 }
366 }
370 val tailSlotDiff = this.tailSlot.slotConsistent(that.tailSlot)
371 val pftAddrDiff = this.pftAddr === that.pftAddr
372 val carryDiff = this.carry === that.carry
373 val isCallDiff = this.isCall === that.isCall
374 val isRetDiff = this.isRet === that.isRet
375 val isJalrDiff = this.isJalr === that.isJalr
367 val tailSlotDiff = this.tailSlot.slotConsistent(that.tailSlot)
368 val pftAddrDiff = this.pftAddr === that.pftAddr
369 val carryDiff = this.carry === that.carry
370 val isCallDiff = this.isCall === that.isCall
371 val isRetDiff = this.isRet === that.isRet
372 val isJalrDiff = this.isJalr === that.isJalr
376 val lastMayBeRviCallDiff = this.last_may_be_rvi_call === that.last_may_be_rvi_call
373 val lastMayBeRviCallDiff = this.last_may_be_rvi_call === that.last_may_be_rvi_call
377 val alwaysTakenDiff : IndexedSeq[Bool] =
378 this.always_taken.zip(that.always_taken).map{
379 case(x, y) => x === y
374 val alwaysTakenDiff: IndexedSeq[Bool] =
375 this.always_taken.zip(that.always_taken).map {
376 case (x, y) => x === y
380 }
381 VecInit(
382 validDiff,
377 }
378 VecInit(
379 validDiff,
383 brSlotsDiffSeq.reduce(_&&_),
380 brSlotsDiffSeq.reduce(_ && _),
384 tailSlotDiff,
385 pftAddrDiff,
386 carryDiff,
387 isCallDiff,
388 isRetDiff,
389 isJalrDiff,
390 lastMayBeRviCallDiff,
381 tailSlotDiff,
382 pftAddrDiff,
383 carryDiff,
384 isCallDiff,
385 isRetDiff,
386 isJalrDiff,
387 lastMayBeRviCallDiff,
391 alwaysTakenDiff.reduce(_&&_)
392 ).reduce(_&&_)
388 alwaysTakenDiff.reduce(_ && _)
389 ).reduce(_ && _)
393 }
394
395 def display(cond: Bool): Unit = {
396 XSDebug(cond, p"-----------FTB entry----------- \n")
397 XSDebug(cond, p"v=${valid}\n")
390 }
391
392 def display(cond: Bool): Unit = {
393 XSDebug(cond, p"-----------FTB entry----------- \n")
394 XSDebug(cond, p"v=${valid}\n")
398 for(i <- 0 until numBr) {
399 XSDebug(cond, p"[br$i]: v=${allSlotsForBr(i).valid}, offset=${allSlotsForBr(i).offset}," +
400 p"lower=${Hexadecimal(allSlotsForBr(i).lower)}\n")
395 for (i <- 0 until numBr) {
396 XSDebug(
397 cond,
398 p"[br$i]: v=${allSlotsForBr(i).valid}, offset=${allSlotsForBr(i).offset}," +
399 p"lower=${Hexadecimal(allSlotsForBr(i).lower)}\n"
400 )
401 }
401 }
402 XSDebug(cond, p"[tailSlot]: v=${tailSlot.valid}, offset=${tailSlot.offset}," +
403 p"lower=${Hexadecimal(tailSlot.lower)}, sharing=${tailSlot.sharing}}\n")
402 XSDebug(
403 cond,
404 p"[tailSlot]: v=${tailSlot.valid}, offset=${tailSlot.offset}," +
405 p"lower=${Hexadecimal(tailSlot.lower)}, sharing=${tailSlot.sharing}}\n"
406 )
404 XSDebug(cond, p"pftAddr=${Hexadecimal(pftAddr)}, carry=$carry\n")
405 XSDebug(cond, p"isCall=$isCall, isRet=$isRet, isjalr=$isJalr\n")
406 XSDebug(cond, p"last_may_be_rvi_call=$last_may_be_rvi_call\n")
407 XSDebug(cond, p"------------------------------- \n")
408 }
409
410}
411
412class FTBEntryWithTag(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
413 val entry = new FTBEntry
407 XSDebug(cond, p"pftAddr=${Hexadecimal(pftAddr)}, carry=$carry\n")
408 XSDebug(cond, p"isCall=$isCall, isRet=$isRet, isjalr=$isJalr\n")
409 XSDebug(cond, p"last_may_be_rvi_call=$last_may_be_rvi_call\n")
410 XSDebug(cond, p"------------------------------- \n")
411 }
412
413}
414
415class FTBEntryWithTag(implicit p: Parameters) extends XSBundle with FTBParams with BPUUtils {
416 val entry = new FTBEntry
414 val tag = UInt(tagSize.W)
417 val tag = UInt(tagSize.W)
415 def display(cond: Bool): Unit = {
416 entry.display(cond)
417 XSDebug(cond, p"tag is ${Hexadecimal(tag)}\n------------------------------- \n")
418 }
419}
420
421class FTBMeta(implicit p: Parameters) extends XSBundle with FTBParams {
418 def display(cond: Bool): Unit = {
419 entry.display(cond)
420 XSDebug(cond, p"tag is ${Hexadecimal(tag)}\n------------------------------- \n")
421 }
422}
423
424class FTBMeta(implicit p: Parameters) extends XSBundle with FTBParams {
422 val writeWay = UInt(log2Ceil(numWays).W)
423 val hit = Bool()
425 val writeWay = UInt(log2Ceil(numWays).W)
426 val hit = Bool()
424 val pred_cycle = if (!env.FPGAPlatform) Some(UInt(64.W)) else None
425}
426
427object FTBMeta {
428 def apply(writeWay: UInt, hit: Bool, pred_cycle: UInt)(implicit p: Parameters): FTBMeta = {
429 val e = Wire(new FTBMeta)
430 e.writeWay := writeWay
427 val pred_cycle = if (!env.FPGAPlatform) Some(UInt(64.W)) else None
428}
429
430object FTBMeta {
431 def apply(writeWay: UInt, hit: Bool, pred_cycle: UInt)(implicit p: Parameters): FTBMeta = {
432 val e = Wire(new FTBMeta)
433 e.writeWay := writeWay
431 e.hit := hit
434 e.hit := hit
432 e.pred_cycle.map(_ := pred_cycle)
433 e
434 }
435}
436
437// class UpdateQueueEntry(implicit p: Parameters) extends XSBundle with FTBParams {
438// val pc = UInt(VAddrBits.W)
439// val ftb_entry = new FTBEntry

--- 7 unchanged lines hidden (view full) ---

447// e.pc := pc
448// e.ftb_entry := fe
449// e.hit := hit
450// e.hit_way := hit_way
451// e
452// }
453// }
454
435 e.pred_cycle.map(_ := pred_cycle)
436 e
437 }
438}
439
440// class UpdateQueueEntry(implicit p: Parameters) extends XSBundle with FTBParams {
441// val pc = UInt(VAddrBits.W)
442// val ftb_entry = new FTBEntry

--- 7 unchanged lines hidden (view full) ---

450// e.pc := pc
451// e.ftb_entry := fe
452// e.hit := hit
453// e.hit_way := hit_way
454// e
455// }
456// }
457
455
456class FTBTableAddr(val idxBits: Int, val banks: Int, val skewedBits: Int)(implicit p: Parameters) extends XSBundle {
457 val addr = new TableAddr(idxBits, banks)
458 def getIdx(x: UInt) = addr.getIdx(x) ^ Cat(addr.getTag(x), addr.getIdx(x))(idxBits + skewedBits - 1, skewedBits)
459 def getTag(x: UInt) = addr.getTag(x)
460}
461
462class FTB(implicit p: Parameters) extends BasePredictor with FTBParams with BPUUtils
458class FTBTableAddr(val idxBits: Int, val banks: Int, val skewedBits: Int)(implicit p: Parameters) extends XSBundle {
459 val addr = new TableAddr(idxBits, banks)
460 def getIdx(x: UInt) = addr.getIdx(x) ^ Cat(addr.getTag(x), addr.getIdx(x))(idxBits + skewedBits - 1, skewedBits)
461 def getTag(x: UInt) = addr.getTag(x)
462}
463
464class FTB(implicit p: Parameters) extends BasePredictor with FTBParams with BPUUtils
463 with HasCircularQueuePtrHelper with HasPerfEvents {
465 with HasCircularQueuePtrHelper with HasPerfEvents {
464 override val meta_size = WireInit(0.U.asTypeOf(new FTBMeta)).getWidth
465
466 val ftbAddr = new FTBTableAddr(log2Up(numSets), 1, 3)
467
468 class FTBBank(val numSets: Int, val nWays: Int) extends XSModule with BPUUtils {
469 val io = IO(new Bundle {
470 val s1_fire = Input(Bool())
471
472 // when ftb hit, read_hits.valid is true, and read_hits.bits is OH of hit way
473 // when ftb not hit, read_hits.valid is false, and read_hits is OH of allocWay
474 // val read_hits = Valid(Vec(numWays, Bool()))
466 override val meta_size = WireInit(0.U.asTypeOf(new FTBMeta)).getWidth
467
468 val ftbAddr = new FTBTableAddr(log2Up(numSets), 1, 3)
469
470 class FTBBank(val numSets: Int, val nWays: Int) extends XSModule with BPUUtils {
471 val io = IO(new Bundle {
472 val s1_fire = Input(Bool())
473
474 // when ftb hit, read_hits.valid is true, and read_hits.bits is OH of hit way
475 // when ftb not hit, read_hits.valid is false, and read_hits is OH of allocWay
476 // val read_hits = Valid(Vec(numWays, Bool()))
475 val req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W)))
477 val req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W)))
476 val read_resp = Output(new FTBEntry)
477 val read_hits = Valid(UInt(log2Ceil(numWays).W))
478
479 val read_multi_entry = Output(new FTBEntry)
478 val read_resp = Output(new FTBEntry)
479 val read_hits = Valid(UInt(log2Ceil(numWays).W))
480
481 val read_multi_entry = Output(new FTBEntry)
480 val read_multi_hits = Valid(UInt(log2Ceil(numWays).W))
482 val read_multi_hits = Valid(UInt(log2Ceil(numWays).W))
481
483
482 val u_req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W)))
483 val update_hits = Valid(UInt(log2Ceil(numWays).W))
484 val u_req_pc = Flipped(DecoupledIO(UInt(VAddrBits.W)))
485 val update_hits = Valid(UInt(log2Ceil(numWays).W))
484 val update_access = Input(Bool())
485
486 val update_access = Input(Bool())
487
486 val update_pc = Input(UInt(VAddrBits.W))
487 val update_write_data = Flipped(Valid(new FTBEntryWithTag))
488 val update_write_way = Input(UInt(log2Ceil(numWays).W))
488 val update_pc = Input(UInt(VAddrBits.W))
489 val update_write_data = Flipped(Valid(new FTBEntryWithTag))
490 val update_write_way = Input(UInt(log2Ceil(numWays).W))
489 val update_write_alloc = Input(Bool())
490 })
491
492 // Extract holdRead logic to fix bug that update read override predict read result
491 val update_write_alloc = Input(Bool())
492 })
493
494 // Extract holdRead logic to fix bug that update read override predict read result
493 val ftb = Module(new SRAMTemplate(new FTBEntryWithTag, set = numSets, way = numWays, shouldReset = true, holdRead = false, singlePort = true))
495 val ftb = Module(new SRAMTemplate(
496 new FTBEntryWithTag,
497 set = numSets,
498 way = numWays,
499 shouldReset = true,
500 holdRead = false,
501 singlePort = true
502 ))
494 val ftb_r_entries = ftb.io.r.resp.data.map(_.entry)
495
503 val ftb_r_entries = ftb.io.r.resp.data.map(_.entry)
504
496 val pred_rdata = HoldUnless(ftb.io.r.resp.data, RegNext(io.req_pc.valid && !io.update_access))
505 val pred_rdata = HoldUnless(ftb.io.r.resp.data, RegNext(io.req_pc.valid && !io.update_access))
497 ftb.io.r.req.valid := io.req_pc.valid || io.u_req_pc.valid // io.s0_fire
506 ftb.io.r.req.valid := io.req_pc.valid || io.u_req_pc.valid // io.s0_fire
498 ftb.io.r.req.bits.setIdx := Mux(io.u_req_pc.valid, ftbAddr.getIdx(io.u_req_pc.bits), ftbAddr.getIdx(io.req_pc.bits)) // s0_idx
507 ftb.io.r.req.bits.setIdx := Mux(
508 io.u_req_pc.valid,
509 ftbAddr.getIdx(io.u_req_pc.bits),
510 ftbAddr.getIdx(io.req_pc.bits)
511 ) // s0_idx
499
500 assert(!(io.req_pc.valid && io.u_req_pc.valid))
501
512
513 assert(!(io.req_pc.valid && io.u_req_pc.valid))
514
502 io.req_pc.ready := ftb.io.r.req.ready
515 io.req_pc.ready := ftb.io.r.req.ready
503 io.u_req_pc.ready := ftb.io.r.req.ready
504
516 io.u_req_pc.ready := ftb.io.r.req.ready
517
505 val req_tag = RegEnable(ftbAddr.getTag(io.req_pc.bits)(tagSize-1, 0), io.req_pc.valid)
518 val req_tag = RegEnable(ftbAddr.getTag(io.req_pc.bits)(tagSize - 1, 0), io.req_pc.valid)
506 val req_idx = RegEnable(ftbAddr.getIdx(io.req_pc.bits), io.req_pc.valid)
507
519 val req_idx = RegEnable(ftbAddr.getIdx(io.req_pc.bits), io.req_pc.valid)
520
508 val u_req_tag = RegEnable(ftbAddr.getTag(io.u_req_pc.bits)(tagSize-1, 0), io.u_req_pc.valid)
521 val u_req_tag = RegEnable(ftbAddr.getTag(io.u_req_pc.bits)(tagSize - 1, 0), io.u_req_pc.valid)
509
510 val read_entries = pred_rdata.map(_.entry)
511 val read_tags = pred_rdata.map(_.tag)
512
522
523 val read_entries = pred_rdata.map(_.entry)
524 val read_tags = pred_rdata.map(_.tag)
525
513 val total_hits = VecInit((0 until numWays).map(b => read_tags(b) === req_tag && read_entries(b).valid && io.s1_fire))
514 val hit = total_hits.reduce(_||_)
526 val total_hits =
527 VecInit((0 until numWays).map(b => read_tags(b) === req_tag && read_entries(b).valid && io.s1_fire))
528 val hit = total_hits.reduce(_ || _)
515 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
516 val hit_way = OHToUInt(total_hits)
517
529 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
530 val hit_way = OHToUInt(total_hits)
531
518 //There may be two hits in the four paths of the ftbBank, and the OHToUInt will fail.
519 //If there is a redirect in s2 at this time, the wrong FTBEntry will be used to calculate the target,
520 //resulting in an address error and affecting performance.
521 //The solution is to select a hit entry during multi hit as the entry for s2.
522 //Considering timing, use this entry in s3 and trigger s3-redirect.
523 val total_hits_reg = RegEnable(total_hits, io.s1_fire)
532 // There may be two hits in the four paths of the ftbBank, and the OHToUInt will fail.
533 // If there is a redirect in s2 at this time, the wrong FTBEntry will be used to calculate the target,
534 // resulting in an address error and affecting performance.
535 // The solution is to select a hit entry during multi hit as the entry for s2.
536 // Considering timing, use this entry in s3 and trigger s3-redirect.
537 val total_hits_reg = RegEnable(total_hits, io.s1_fire)
524 val read_entries_reg = read_entries.map(w => RegEnable(w, io.s1_fire))
525
538 val read_entries_reg = read_entries.map(w => RegEnable(w, io.s1_fire))
539
526 val multi_hit = VecInit((0 until numWays).map{
527 i => (0 until numWays).map(j => {
528 if(i < j) total_hits_reg(i) && total_hits_reg(j)
529 else false.B
530 }).reduce(_||_)
531 }).reduce(_||_)
532 val multi_way = PriorityMux(Seq.tabulate(numWays)(i => ((total_hits_reg(i)) -> i.asUInt(log2Ceil(numWays).W))))
533 val multi_hit_selectEntry = PriorityMux(Seq.tabulate(numWays)(i => ((total_hits_reg(i)) -> read_entries_reg(i))))
540 val multi_hit = VecInit((0 until numWays).map {
541 i =>
542 (0 until numWays).map { j =>
543 if (i < j) total_hits_reg(i) && total_hits_reg(j)
544 else false.B
545 }.reduce(_ || _)
546 }).reduce(_ || _)
547 val multi_way = PriorityMux(Seq.tabulate(numWays)(i => (total_hits_reg(i)) -> i.asUInt(log2Ceil(numWays).W)))
548 val multi_hit_selectEntry = PriorityMux(Seq.tabulate(numWays)(i => (total_hits_reg(i)) -> read_entries_reg(i)))
534
549
535 //Check if the entry read by ftbBank is legal.
536 for (n <- 0 to numWays -1 ) {
537 val req_pc_reg = RegEnable(io.req_pc.bits, 0.U.asTypeOf(io.req_pc.bits), io.req_pc.valid)
550 // Check if the entry read by ftbBank is legal.
551 for (n <- 0 to numWays - 1) {
552 val req_pc_reg = RegEnable(io.req_pc.bits, 0.U.asTypeOf(io.req_pc.bits), io.req_pc.valid)
538 val req_pc_reg_lower = Cat(0.U(1.W), req_pc_reg(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
539 val ftbEntryEndLowerwithCarry = Cat(read_entries(n).carry, read_entries(n).pftAddr)
553 val req_pc_reg_lower = Cat(0.U(1.W), req_pc_reg(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
554 val ftbEntryEndLowerwithCarry = Cat(read_entries(n).carry, read_entries(n).pftAddr)
540 val fallThroughErr = req_pc_reg_lower + (PredictWidth).U >= ftbEntryEndLowerwithCarry
541 when(read_entries(n).valid && total_hits(n) && io.s1_fire){
555 val fallThroughErr = req_pc_reg_lower + PredictWidth.U >= ftbEntryEndLowerwithCarry
556 when(read_entries(n).valid && total_hits(n) && io.s1_fire) {
542 assert(fallThroughErr, s"FTB read sram entry in way${n} fallThrough address error!")
543 }
544 }
545
546 val u_total_hits = VecInit((0 until numWays).map(b =>
557 assert(fallThroughErr, s"FTB read sram entry in way${n} fallThrough address error!")
558 }
559 }
560
561 val u_total_hits = VecInit((0 until numWays).map(b =>
547 ftb.io.r.resp.data(b).tag === u_req_tag && ftb.io.r.resp.data(b).entry.valid && RegNext(io.update_access)))
548 val u_hit = u_total_hits.reduce(_||_)
562 ftb.io.r.resp.data(b).tag === u_req_tag && ftb.io.r.resp.data(b).entry.valid && RegNext(io.update_access)
563 ))
564 val u_hit = u_total_hits.reduce(_ || _)
549 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
550 val u_hit_way = OHToUInt(u_total_hits)
551
552 // assert(PopCount(total_hits) === 1.U || PopCount(total_hits) === 0.U)
553 // assert(PopCount(u_total_hits) === 1.U || PopCount(u_total_hits) === 0.U)
554 for (n <- 1 to numWays) {
555 XSPerfAccumulate(f"ftb_pred_${n}_way_hit", PopCount(total_hits) === n.U)
556 XSPerfAccumulate(f"ftb_update_${n}_way_hit", PopCount(u_total_hits) === n.U)

--- 6 unchanged lines hidden (view full) ---

563 val touch_way = Seq.fill(1)(Wire(Valid(UInt(log2Ceil(numWays).W))))
564
565 val write_set = Wire(UInt(log2Ceil(numSets).W))
566 val write_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
567
568 val read_set = Wire(UInt(log2Ceil(numSets).W))
569 val read_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
570
565 // val hit_way_1h = VecInit(PriorityEncoderOH(total_hits))
566 val u_hit_way = OHToUInt(u_total_hits)
567
568 // assert(PopCount(total_hits) === 1.U || PopCount(total_hits) === 0.U)
569 // assert(PopCount(u_total_hits) === 1.U || PopCount(u_total_hits) === 0.U)
570 for (n <- 1 to numWays) {
571 XSPerfAccumulate(f"ftb_pred_${n}_way_hit", PopCount(total_hits) === n.U)
572 XSPerfAccumulate(f"ftb_update_${n}_way_hit", PopCount(u_total_hits) === n.U)

--- 6 unchanged lines hidden (view full) ---

579 val touch_way = Seq.fill(1)(Wire(Valid(UInt(log2Ceil(numWays).W))))
580
581 val write_set = Wire(UInt(log2Ceil(numSets).W))
582 val write_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
583
584 val read_set = Wire(UInt(log2Ceil(numSets).W))
585 val read_way = Wire(Valid(UInt(log2Ceil(numWays).W)))
586
571 read_set := req_idx
587 read_set := req_idx
572 read_way.valid := hit
573 read_way.bits := hit_way
574
575 // Read replacer access is postponed for 1 cycle
576 // this helps timing
588 read_way.valid := hit
589 read_way.bits := hit_way
590
591 // Read replacer access is postponed for 1 cycle
592 // this helps timing
577 touch_set(0) := Mux(write_way.valid, write_set, RegNext(read_set))
593 touch_set(0) := Mux(write_way.valid, write_set, RegNext(read_set))
578 touch_way(0).valid := write_way.valid || RegNext(read_way.valid)
594 touch_way(0).valid := write_way.valid || RegNext(read_way.valid)
579 touch_way(0).bits := Mux(write_way.valid, write_way.bits, RegNext(read_way.bits))
595 touch_way(0).bits := Mux(write_way.valid, write_way.bits, RegNext(read_way.bits))
580
581 replacer.access(touch_set, touch_way)
582
583 // Select the update allocate way
584 // Selection logic:
585 // 1. if any entries within the same index is not valid, select it
586 // 2. if all entries is valid, use replacer
596
597 replacer.access(touch_set, touch_way)
598
599 // Select the update allocate way
600 // Selection logic:
601 // 1. if any entries within the same index is not valid, select it
602 // 2. if all entries is valid, use replacer
587 def allocWay(valids: UInt, idx: UInt): UInt = {
603 def allocWay(valids: UInt, idx: UInt): UInt =
588 if (numWays > 1) {
604 if (numWays > 1) {
589 val w = Wire(UInt(log2Up(numWays).W))
605 val w = Wire(UInt(log2Up(numWays).W))
590 val valid = WireInit(valids.andR)
591 w := Mux(valid, replacer.way(idx), PriorityEncoder(~valids))
592 w
593 } else {
594 val w = WireInit(0.U(log2Up(numWays).W))
595 w
596 }
606 val valid = WireInit(valids.andR)
607 w := Mux(valid, replacer.way(idx), PriorityEncoder(~valids))
608 w
609 } else {
610 val w = WireInit(0.U(log2Up(numWays).W))
611 w
612 }
597 }
598
613
599 io.read_resp := Mux1H(total_hits, read_entries) // Mux1H
614 io.read_resp := Mux1H(total_hits, read_entries) // Mux1H
600 io.read_hits.valid := hit
615 io.read_hits.valid := hit
601 io.read_hits.bits := hit_way
616 io.read_hits.bits := hit_way
602
617
603 io.read_multi_entry := multi_hit_selectEntry
618 io.read_multi_entry := multi_hit_selectEntry
604 io.read_multi_hits.valid := multi_hit
619 io.read_multi_hits.valid := multi_hit
605 io.read_multi_hits.bits := multi_way
620 io.read_multi_hits.bits := multi_way
606
607 io.update_hits.valid := u_hit
621
622 io.update_hits.valid := u_hit
608 io.update_hits.bits := u_hit_way
623 io.update_hits.bits := u_hit_way
609
610 // Update logic
624
625 // Update logic
611 val u_valid = io.update_write_data.valid
612 val u_data = io.update_write_data.bits
613 val u_idx = ftbAddr.getIdx(io.update_pc)
626 val u_valid = io.update_write_data.valid
627 val u_data = io.update_write_data.bits
628 val u_idx = ftbAddr.getIdx(io.update_pc)
614 val allocWriteWay = allocWay(RegNext(VecInit(ftb_r_entries.map(_.valid))).asUInt, u_idx)
629 val allocWriteWay = allocWay(RegNext(VecInit(ftb_r_entries.map(_.valid))).asUInt, u_idx)
615 val u_way = Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
616 val u_mask = UIntToOH(u_way)
630 val u_way = Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
631 val u_mask = UIntToOH(u_way)
617
618 for (i <- 0 until numWays) {
619 XSPerfAccumulate(f"ftb_replace_way$i", u_valid && io.update_write_alloc && u_way === i.U)
632
633 for (i <- 0 until numWays) {
634 XSPerfAccumulate(f"ftb_replace_way$i", u_valid && io.update_write_alloc && u_way === i.U)
620 XSPerfAccumulate(f"ftb_replace_way${i}_has_empty", u_valid && io.update_write_alloc && !ftb_r_entries.map(_.valid).reduce(_&&_) && u_way === i.U)
635 XSPerfAccumulate(
636 f"ftb_replace_way${i}_has_empty",
637 u_valid && io.update_write_alloc && !ftb_r_entries.map(_.valid).reduce(_ && _) && u_way === i.U
638 )
621 XSPerfAccumulate(f"ftb_hit_way$i", hit && !io.update_access && hit_way === i.U)
622 }
623
624 ftb.io.w.apply(u_valid, u_data, u_idx, u_mask)
625
626 // for replacer
639 XSPerfAccumulate(f"ftb_hit_way$i", hit && !io.update_access && hit_way === i.U)
640 }
641
642 ftb.io.w.apply(u_valid, u_data, u_idx, u_mask)
643
644 // for replacer
627 write_set := u_idx
645 write_set := u_idx
628 write_way.valid := u_valid
646 write_way.valid := u_valid
629 write_way.bits := Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
647 write_way.bits := Mux(io.update_write_alloc, allocWriteWay, io.update_write_way)
630
631 // print hit entry info
632 Mux1H(total_hits, ftb.io.r.resp.data).display(true.B)
633 } // FTBBank
634
648
649 // print hit entry info
650 Mux1H(total_hits, ftb.io.r.resp.data).display(true.B)
651 } // FTBBank
652
635 //FTB switch register & temporary storage of fauftb prediction results
636 val s0_close_ftb_req = RegInit(false.B)
637 val s1_close_ftb_req = RegEnable(s0_close_ftb_req, false.B, io.s0_fire(0))
638 val s2_close_ftb_req = RegEnable(s1_close_ftb_req, false.B, io.s1_fire(0))
639 val s2_fauftb_ftb_entry_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_in, f))
653 // FTB switch register & temporary storage of fauftb prediction results
654 val s0_close_ftb_req = RegInit(false.B)
655 val s1_close_ftb_req = RegEnable(s0_close_ftb_req, false.B, io.s0_fire(0))
656 val s2_close_ftb_req = RegEnable(s1_close_ftb_req, false.B, io.s1_fire(0))
657 val s2_fauftb_ftb_entry_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_in, f))
640 val s2_fauftb_ftb_entry_hit_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_hit_in, f))
641
642 val ftbBank = Module(new FTBBank(numSets, numWays))
643
658 val s2_fauftb_ftb_entry_hit_dup = io.s1_fire.map(f => RegEnable(io.fauftb_entry_hit_in, f))
659
660 val ftbBank = Module(new FTBBank(numSets, numWays))
661
644 //for close ftb read_req
662 // for close ftb read_req
645 ftbBank.io.req_pc.valid := io.s0_fire(0) && !s0_close_ftb_req
663 ftbBank.io.req_pc.valid := io.s0_fire(0) && !s0_close_ftb_req
646 ftbBank.io.req_pc.bits := s0_pc_dup(0)
664 ftbBank.io.req_pc.bits := s0_pc_dup(0)
647
665
648 val s2_multi_hit = ftbBank.io.read_multi_hits.valid && io.s2_fire(0)
649 val s2_multi_hit_way = ftbBank.io.read_multi_hits.bits
650 val s2_multi_hit_entry = ftbBank.io.read_multi_entry
666 val s2_multi_hit = ftbBank.io.read_multi_hits.valid && io.s2_fire(0)
667 val s2_multi_hit_way = ftbBank.io.read_multi_hits.bits
668 val s2_multi_hit_entry = ftbBank.io.read_multi_entry
651 val s2_multi_hit_enable = s2_multi_hit && !s2_close_ftb_req
652 XSPerfAccumulate("ftb_s2_multi_hit", s2_multi_hit)
653 XSPerfAccumulate("ftb_s2_multi_hit_enable", s2_multi_hit_enable)
654
669 val s2_multi_hit_enable = s2_multi_hit && !s2_close_ftb_req
670 XSPerfAccumulate("ftb_s2_multi_hit", s2_multi_hit)
671 XSPerfAccumulate("ftb_s2_multi_hit_enable", s2_multi_hit_enable)
672
655 //After closing ftb, the entry output from s2 is the entry of FauFTB cached in s1
656 val btb_enable_dup = dup(RegNext(io.ctrl.btb_enable))
657 val s1_read_resp = Mux(s1_close_ftb_req, io.fauftb_entry_in, ftbBank.io.read_resp)
658 val s2_ftbBank_dup = io.s1_fire.map(f => RegEnable(ftbBank.io.read_resp, f))
673 // After closing ftb, the entry output from s2 is the entry of FauFTB cached in s1
674 val btb_enable_dup = dup(RegNext(io.ctrl.btb_enable))
675 val s1_read_resp = Mux(s1_close_ftb_req, io.fauftb_entry_in, ftbBank.io.read_resp)
676 val s2_ftbBank_dup = io.s1_fire.map(f => RegEnable(ftbBank.io.read_resp, f))
659 val s2_ftb_entry_dup = dup(0.U.asTypeOf(new FTBEntry))
677 val s2_ftb_entry_dup = dup(0.U.asTypeOf(new FTBEntry))
660 for(((s2_fauftb_entry, s2_ftbBank_entry), s2_ftb_entry) <-
661 s2_fauftb_ftb_entry_dup zip s2_ftbBank_dup zip s2_ftb_entry_dup){
662 s2_ftb_entry := Mux(s2_close_ftb_req, s2_fauftb_entry, s2_ftbBank_entry)
678 for (
679 ((s2_fauftb_entry, s2_ftbBank_entry), s2_ftb_entry) <-
680 s2_fauftb_ftb_entry_dup zip s2_ftbBank_dup zip s2_ftb_entry_dup
681 ) {
682 s2_ftb_entry := Mux(s2_close_ftb_req, s2_fauftb_entry, s2_ftbBank_entry)
663 }
683 }
664 val s3_ftb_entry_dup = io.s2_fire.zip(s2_ftb_entry_dup).map {case (f, e) => RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_entry, e), f)}
665 val real_s2_ftb_entry = Mux(s2_multi_hit_enable, s2_multi_hit_entry, s2_ftb_entry_dup(0))
666 val real_s2_pc = s2_pc_dup(0).getAddr()
667 val real_s2_startLower = Cat(0.U(1.W), real_s2_pc(instOffsetBits+log2Ceil(PredictWidth)-1, instOffsetBits))
668 val real_s2_endLowerwithCarry = Cat(real_s2_ftb_entry.carry, real_s2_ftb_entry.pftAddr)
669 val real_s2_fallThroughErr = real_s2_startLower >= real_s2_endLowerwithCarry || real_s2_endLowerwithCarry > (real_s2_startLower + (PredictWidth).U)
670 val real_s3_fallThroughErr_dup = io.s2_fire.map {f => RegEnable(real_s2_fallThroughErr, f)}
671
672 //After closing ftb, the hit output from s2 is the hit of FauFTB cached in s1.
673 //s1_hit is the ftbBank hit.
674 val s1_hit = Mux(s1_close_ftb_req, false.B, ftbBank.io.read_hits.valid && io.ctrl.btb_enable)
684 val s3_ftb_entry_dup = io.s2_fire.zip(s2_ftb_entry_dup).map { case (f, e) =>
685 RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_entry, e), f)
686 }
687 val real_s2_ftb_entry = Mux(s2_multi_hit_enable, s2_multi_hit_entry, s2_ftb_entry_dup(0))
688 val real_s2_pc = s2_pc_dup(0).getAddr()
689 val real_s2_startLower = Cat(0.U(1.W), real_s2_pc(instOffsetBits + log2Ceil(PredictWidth) - 1, instOffsetBits))
690 val real_s2_endLowerwithCarry = Cat(real_s2_ftb_entry.carry, real_s2_ftb_entry.pftAddr)
691 val real_s2_fallThroughErr =
692 real_s2_startLower >= real_s2_endLowerwithCarry || real_s2_endLowerwithCarry > (real_s2_startLower + PredictWidth.U)
693 val real_s3_fallThroughErr_dup = io.s2_fire.map(f => RegEnable(real_s2_fallThroughErr, f))
694
695 // After closing ftb, the hit output from s2 is the hit of FauFTB cached in s1.
696 // s1_hit is the ftbBank hit.
697 val s1_hit = Mux(s1_close_ftb_req, false.B, ftbBank.io.read_hits.valid && io.ctrl.btb_enable)
675 val s2_ftb_hit_dup = io.s1_fire.map(f => RegEnable(s1_hit, 0.B, f))
698 val s2_ftb_hit_dup = io.s1_fire.map(f => RegEnable(s1_hit, 0.B, f))
676 val s2_hit_dup = dup(0.U.asTypeOf(Bool()))
677 for(((s2_fauftb_hit, s2_ftb_hit), s2_hit) <-
678 s2_fauftb_ftb_entry_hit_dup zip s2_ftb_hit_dup zip s2_hit_dup){
679 s2_hit := Mux(s2_close_ftb_req, s2_fauftb_hit, s2_ftb_hit)
699 val s2_hit_dup = dup(0.U.asTypeOf(Bool()))
700 for (
701 ((s2_fauftb_hit, s2_ftb_hit), s2_hit) <-
702 s2_fauftb_ftb_entry_hit_dup zip s2_ftb_hit_dup zip s2_hit_dup
703 ) {
704 s2_hit := Mux(s2_close_ftb_req, s2_fauftb_hit, s2_ftb_hit)
680 }
705 }
681 val s3_hit_dup = io.s2_fire.zip(s2_hit_dup).map {case (f, h) => RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit, h), 0.B, f)}
682 val s3_multi_hit_dup = io.s2_fire.map(f => RegEnable(s2_multi_hit_enable,f))
683 val writeWay = Mux(s1_close_ftb_req, 0.U, ftbBank.io.read_hits.bits)
684 val s2_ftb_meta = RegEnable(FTBMeta(writeWay.asUInt, s1_hit, GTimer()).asUInt, io.s1_fire(0))
706 val s3_hit_dup = io.s2_fire.zip(s2_hit_dup).map { case (f, h) =>
707 RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit, h), 0.B, f)
708 }
709 val s3_multi_hit_dup = io.s2_fire.map(f => RegEnable(s2_multi_hit_enable, f))
710 val writeWay = Mux(s1_close_ftb_req, 0.U, ftbBank.io.read_hits.bits)
711 val s2_ftb_meta = RegEnable(FTBMeta(writeWay.asUInt, s1_hit, GTimer()).asUInt, io.s1_fire(0))
685 val s2_multi_hit_meta = FTBMeta(s2_multi_hit_way.asUInt, s2_multi_hit, GTimer()).asUInt
686
712 val s2_multi_hit_meta = FTBMeta(s2_multi_hit_way.asUInt, s2_multi_hit, GTimer()).asUInt
713
687 //Consistent count of entries for fauftb and ftb
714 // Consistent count of entries for fauftb and ftb
688 val fauftb_ftb_entry_consistent_counter = RegInit(0.U(FTBCLOSE_THRESHOLD_SZ.W))
715 val fauftb_ftb_entry_consistent_counter = RegInit(0.U(FTBCLOSE_THRESHOLD_SZ.W))
689 val fauftb_ftb_entry_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftbBank_dup(0))
716 val fauftb_ftb_entry_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftbBank_dup(0))
690
717
691 //if close ftb_req, the counter need keep
692 when(io.s2_fire(0) && s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0) ){
693 fauftb_ftb_entry_consistent_counter := Mux(fauftb_ftb_entry_consistent, fauftb_ftb_entry_consistent_counter + 1.U, 0.U)
694 } .elsewhen(io.s2_fire(0) && !s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0) ){
718 // if close ftb_req, the counter need keep
719 when(io.s2_fire(0) && s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0)) {
720 fauftb_ftb_entry_consistent_counter := Mux(
721 fauftb_ftb_entry_consistent,
722 fauftb_ftb_entry_consistent_counter + 1.U,
723 0.U
724 )
725 }.elsewhen(io.s2_fire(0) && !s2_fauftb_ftb_entry_hit_dup(0) && s2_ftb_hit_dup(0)) {
695 fauftb_ftb_entry_consistent_counter := 0.U
696 }
697
726 fauftb_ftb_entry_consistent_counter := 0.U
727 }
728
698 when((fauftb_ftb_entry_consistent_counter >= FTBCLOSE_THRESHOLD) && io.s0_fire(0)){
729 when((fauftb_ftb_entry_consistent_counter >= FTBCLOSE_THRESHOLD) && io.s0_fire(0)) {
699 s0_close_ftb_req := true.B
700 }
701
730 s0_close_ftb_req := true.B
731 }
732
702 //Clear counter during false_hit or ifuRedirect
733 // Clear counter during false_hit or ifuRedirect
703 val ftb_false_hit = WireInit(false.B)
734 val ftb_false_hit = WireInit(false.B)
704 val needReopen = s0_close_ftb_req && (ftb_false_hit || io.redirectFromIFU)
735 val needReopen = s0_close_ftb_req && (ftb_false_hit || io.redirectFromIFU)
705 ftb_false_hit := io.update.valid && io.update.bits.false_hit
736 ftb_false_hit := io.update.valid && io.update.bits.false_hit
706 when(needReopen){
737 when(needReopen) {
707 fauftb_ftb_entry_consistent_counter := 0.U
738 fauftb_ftb_entry_consistent_counter := 0.U
708 s0_close_ftb_req := false.B
739 s0_close_ftb_req := false.B
709 }
710
740 }
741
711 val s2_close_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftb_entry_dup(0))
742 val s2_close_consistent = s2_fauftb_ftb_entry_dup(0).entryConsistent(s2_ftb_entry_dup(0))
712 val s2_not_close_consistent = s2_ftbBank_dup(0).entryConsistent(s2_ftb_entry_dup(0))
713
743 val s2_not_close_consistent = s2_ftbBank_dup(0).entryConsistent(s2_ftb_entry_dup(0))
744
714 when(s2_close_ftb_req && io.s2_fire(0)){
745 when(s2_close_ftb_req && io.s2_fire(0)) {
715 assert(s2_close_consistent, s"Entry inconsistency after ftb req is closed!")
746 assert(s2_close_consistent, s"Entry inconsistency after ftb req is closed!")
716 }.elsewhen(!s2_close_ftb_req && io.s2_fire(0)){
747 }.elsewhen(!s2_close_ftb_req && io.s2_fire(0)) {
717 assert(s2_not_close_consistent, s"Entry inconsistency after ftb req is not closed!")
718 }
719
748 assert(s2_not_close_consistent, s"Entry inconsistency after ftb req is not closed!")
749 }
750
720 val reopenCounter = !s1_close_ftb_req && s2_close_ftb_req && io.s2_fire(0)
721 val falseHitReopenCounter = ftb_false_hit && s1_close_ftb_req
751 val reopenCounter = !s1_close_ftb_req && s2_close_ftb_req && io.s2_fire(0)
752 val falseHitReopenCounter = ftb_false_hit && s1_close_ftb_req
722 XSPerfAccumulate("ftb_req_reopen_counter", reopenCounter)
723 XSPerfAccumulate("false_hit_reopen_Counter", falseHitReopenCounter)
753 XSPerfAccumulate("ftb_req_reopen_counter", reopenCounter)
754 XSPerfAccumulate("false_hit_reopen_Counter", falseHitReopenCounter)
724 XSPerfAccumulate("ifuRedirec_needReopen",s1_close_ftb_req && io.redirectFromIFU)
725 XSPerfAccumulate("this_cycle_is_close",s2_close_ftb_req && io.s2_fire(0))
726 XSPerfAccumulate("this_cycle_is_open",!s2_close_ftb_req && io.s2_fire(0))
755 XSPerfAccumulate("ifuRedirec_needReopen", s1_close_ftb_req && io.redirectFromIFU)
756 XSPerfAccumulate("this_cycle_is_close", s2_close_ftb_req && io.s2_fire(0))
757 XSPerfAccumulate("this_cycle_is_open", !s2_close_ftb_req && io.s2_fire(0))
727
728 // io.out.bits.resp := RegEnable(io.in.bits.resp_in(0), 0.U.asTypeOf(new BranchPredictionResp), io.s1_fire)
729 io.out := io.in.bits.resp_in(0)
730
758
759 // io.out.bits.resp := RegEnable(io.in.bits.resp_in(0), 0.U.asTypeOf(new BranchPredictionResp), io.s1_fire)
760 io.out := io.in.bits.resp_in(0)
761
731 io.out.s2.full_pred.map {case fp => fp.multiHit := false.B}
762 io.out.s2.full_pred.map { case fp => fp.multiHit := false.B }
732
763
733 io.out.s2.full_pred.zip(s2_hit_dup).map {case (fp, h) => fp.hit := h}
734 for (full_pred & s2_ftb_entry & s2_pc & s1_pc & s1_fire <-
735 io.out.s2.full_pred zip s2_ftb_entry_dup zip s2_pc_dup zip s1_pc_dup zip io.s1_fire) {
736 full_pred.fromFtbEntry(s2_ftb_entry,
737 s2_pc.getAddr(),
738 // Previous stage meta for better timing
739 Some(s1_pc, s1_fire),
740 Some(s1_read_resp, s1_fire)
741 )
764 io.out.s2.full_pred.zip(s2_hit_dup).map { case (fp, h) => fp.hit := h }
765 for (
766 full_pred & s2_ftb_entry & s2_pc & s1_pc & s1_fire <-
767 io.out.s2.full_pred zip s2_ftb_entry_dup zip s2_pc_dup zip s1_pc_dup zip io.s1_fire
768 ) {
769 full_pred.fromFtbEntry(
770 s2_ftb_entry,
771 s2_pc.getAddr(),
772 // Previous stage meta for better timing
773 Some(s1_pc, s1_fire),
774 Some(s1_read_resp, s1_fire)
775 )
742 }
743
776 }
777
744 io.out.s3.full_pred.zip(s3_hit_dup).map {case (fp, h) => fp.hit := h}
745 io.out.s3.full_pred.zip(s3_multi_hit_dup).map {case (fp, m) => fp.multiHit := m}
746 for (full_pred & s3_ftb_entry & s3_pc & s2_pc & s2_fire <-
747 io.out.s3.full_pred zip s3_ftb_entry_dup zip s3_pc_dup zip s2_pc_dup zip io.s2_fire)
748 full_pred.fromFtbEntry(s3_ftb_entry, s3_pc.getAddr(), Some((s2_pc.getAddr(), s2_fire)))
778 io.out.s3.full_pred.zip(s3_hit_dup).map { case (fp, h) => fp.hit := h }
779 io.out.s3.full_pred.zip(s3_multi_hit_dup).map { case (fp, m) => fp.multiHit := m }
780 for (
781 full_pred & s3_ftb_entry & s3_pc & s2_pc & s2_fire <-
782 io.out.s3.full_pred zip s3_ftb_entry_dup zip s3_pc_dup zip s2_pc_dup zip io.s2_fire
783 )
784 full_pred.fromFtbEntry(s3_ftb_entry, s3_pc.getAddr(), Some((s2_pc.getAddr(), s2_fire)))
749
750 // Overwrite the fallThroughErr value
785
786 // Overwrite the fallThroughErr value
751 io.out.s3.full_pred.zipWithIndex.map {case(fp, i) => fp.fallThroughErr := real_s3_fallThroughErr_dup(i)}
787 io.out.s3.full_pred.zipWithIndex.map { case (fp, i) => fp.fallThroughErr := real_s3_fallThroughErr_dup(i) }
752
753 io.out.last_stage_ftb_entry := s3_ftb_entry_dup(0)
788
789 io.out.last_stage_ftb_entry := s3_ftb_entry_dup(0)
754 io.out.last_stage_meta := RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_meta, s2_ftb_meta), io.s2_fire(0))
755 io.out.s1_ftbCloseReq := s1_close_ftb_req
756 io.out.s1_uftbHit := io.fauftb_entry_hit_in
790 io.out.last_stage_meta := RegEnable(Mux(s2_multi_hit_enable, s2_multi_hit_meta, s2_ftb_meta), io.s2_fire(0))
791 io.out.s1_ftbCloseReq := s1_close_ftb_req
792 io.out.s1_uftbHit := io.fauftb_entry_hit_in
757 val s1_uftbHasIndirect = io.fauftb_entry_in.jmpValid &&
758 io.fauftb_entry_in.isJalr && !io.fauftb_entry_in.isRet // uFTB determines that it's real JALR, RET and JAL are excluded
759 io.out.s1_uftbHasIndirect := s1_uftbHasIndirect
760
761 // always taken logic
762 for (i <- 0 until numBr) {
793 val s1_uftbHasIndirect = io.fauftb_entry_in.jmpValid &&
794 io.fauftb_entry_in.isJalr && !io.fauftb_entry_in.isRet // uFTB determines that it's real JALR, RET and JAL are excluded
795 io.out.s1_uftbHasIndirect := s1_uftbHasIndirect
796
797 // always taken logic
798 for (i <- 0 until numBr) {
763 for (out_fp & in_fp & s2_hit & s2_ftb_entry <-
764 io.out.s2.full_pred zip io.in.bits.resp_in(0).s2.full_pred zip s2_hit_dup zip s2_ftb_entry_dup)
799 for (
800 out_fp & in_fp & s2_hit & s2_ftb_entry <-
801 io.out.s2.full_pred zip io.in.bits.resp_in(0).s2.full_pred zip s2_hit_dup zip s2_ftb_entry_dup
802 )
765 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s2_hit && s2_ftb_entry.always_taken(i)
803 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s2_hit && s2_ftb_entry.always_taken(i)
766 for (out_fp & in_fp & s3_hit & s3_ftb_entry <-
767 io.out.s3.full_pred zip io.in.bits.resp_in(0).s3.full_pred zip s3_hit_dup zip s3_ftb_entry_dup)
804 for (
805 out_fp & in_fp & s3_hit & s3_ftb_entry <-
806 io.out.s3.full_pred zip io.in.bits.resp_in(0).s3.full_pred zip s3_hit_dup zip s3_ftb_entry_dup
807 )
768 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s3_hit && s3_ftb_entry.always_taken(i)
769 }
770
771 // Update logic
772 val update = io.update.bits
773
808 out_fp.br_taken_mask(i) := in_fp.br_taken_mask(i) || s3_hit && s3_ftb_entry.always_taken(i)
809 }
810
811 // Update logic
812 val update = io.update.bits
813
774 val u_meta = update.meta.asTypeOf(new FTBMeta)
814 val u_meta = update.meta.asTypeOf(new FTBMeta)
775 val u_valid = io.update.valid && !io.update.bits.old_entry
776
815 val u_valid = io.update.valid && !io.update.bits.old_entry
816
777 val (_, delay2_pc) = DelayNWithValid(update.pc, u_valid, 2)
817 val (_, delay2_pc) = DelayNWithValid(update.pc, u_valid, 2)
778 val (_, delay2_entry) = DelayNWithValid(update.ftb_entry, u_valid, 2)
779
818 val (_, delay2_entry) = DelayNWithValid(update.ftb_entry, u_valid, 2)
819
780
781 val update_now = u_valid && u_meta.hit
820 val update_now = u_valid && u_meta.hit
782 val update_need_read = u_valid && !u_meta.hit
783 // stall one more cycle because we use a whole cycle to do update read tag hit
821 val update_need_read = u_valid && !u_meta.hit
822 // stall one more cycle because we use a whole cycle to do update read tag hit
784 io.s1_ready := ftbBank.io.req_pc.ready && !(update_need_read) && !RegNext(update_need_read)
823 io.s1_ready := ftbBank.io.req_pc.ready && !update_need_read && !RegNext(update_need_read)
785
786 ftbBank.io.u_req_pc.valid := update_need_read
824
825 ftbBank.io.u_req_pc.valid := update_need_read
787 ftbBank.io.u_req_pc.bits := update.pc
826 ftbBank.io.u_req_pc.bits := update.pc
788
827
789
790
791 val ftb_write = Wire(new FTBEntryWithTag)
792 ftb_write.entry := Mux(update_now, update.ftb_entry, delay2_entry)
828 val ftb_write = Wire(new FTBEntryWithTag)
829 ftb_write.entry := Mux(update_now, update.ftb_entry, delay2_entry)
793 ftb_write.tag := ftbAddr.getTag(Mux(update_now, update.pc, delay2_pc))(tagSize-1, 0)
830 ftb_write.tag := ftbAddr.getTag(Mux(update_now, update.pc, delay2_pc))(tagSize - 1, 0)
794
795 val write_valid = update_now || DelayN(u_valid && !u_meta.hit, 2)
796 val write_pc = Mux(update_now, update.pc, delay2_pc)
797
798 ftbBank.io.update_write_data.valid := write_valid
831
832 val write_valid = update_now || DelayN(u_valid && !u_meta.hit, 2)
833 val write_pc = Mux(update_now, update.pc, delay2_pc)
834
835 ftbBank.io.update_write_data.valid := write_valid
799 ftbBank.io.update_write_data.bits := ftb_write
800 ftbBank.io.update_pc := write_pc
801 ftbBank.io.update_write_way := Mux(update_now, u_meta.writeWay, RegNext(ftbBank.io.update_hits.bits)) // use it one cycle later
802 ftbBank.io.update_write_alloc := Mux(update_now, false.B, RegNext(!ftbBank.io.update_hits.valid)) // use it one cycle later
836 ftbBank.io.update_write_data.bits := ftb_write
837 ftbBank.io.update_pc := write_pc
838 ftbBank.io.update_write_way := Mux(
839 update_now,
840 u_meta.writeWay,
841 RegNext(ftbBank.io.update_hits.bits)
842 ) // use it one cycle later
843 ftbBank.io.update_write_alloc := Mux(
844 update_now,
845 false.B,
846 RegNext(!ftbBank.io.update_hits.valid)
847 ) // use it one cycle later
803 ftbBank.io.update_access := u_valid && !u_meta.hit
848 ftbBank.io.update_access := u_valid && !u_meta.hit
804 ftbBank.io.s1_fire := io.s1_fire(0)
849 ftbBank.io.s1_fire := io.s1_fire(0)
805
806 val ftb_write_fallThrough = ftb_write.entry.getFallThrough(write_pc)
850
851 val ftb_write_fallThrough = ftb_write.entry.getFallThrough(write_pc)
807 when(write_valid){
852 when(write_valid) {
808 assert(write_pc + (FetchWidth * 4).U >= ftb_write_fallThrough, s"FTB write_entry fallThrough address error!")
809 }
810
811 XSDebug("req_v=%b, req_pc=%x, ready=%b (resp at next cycle)\n", io.s0_fire(0), s0_pc_dup(0), ftbBank.io.req_pc.ready)
812 XSDebug("s2_hit=%b, hit_way=%b\n", s2_hit_dup(0), writeWay.asUInt)
853 assert(write_pc + (FetchWidth * 4).U >= ftb_write_fallThrough, s"FTB write_entry fallThrough address error!")
854 }
855
856 XSDebug("req_v=%b, req_pc=%x, ready=%b (resp at next cycle)\n", io.s0_fire(0), s0_pc_dup(0), ftbBank.io.req_pc.ready)
857 XSDebug("s2_hit=%b, hit_way=%b\n", s2_hit_dup(0), writeWay.asUInt)
813 XSDebug("s2_br_taken_mask=%b, s2_real_taken_mask=%b\n",
814 io.in.bits.resp_in(0).s2.full_pred(0).br_taken_mask.asUInt, io.out.s2.full_pred(0).real_slot_taken_mask().asUInt)
858 XSDebug(
859 "s2_br_taken_mask=%b, s2_real_taken_mask=%b\n",
860 io.in.bits.resp_in(0).s2.full_pred(0).br_taken_mask.asUInt,
861 io.out.s2.full_pred(0).real_slot_taken_mask().asUInt
862 )
815 XSDebug("s2_target=%x\n", io.out.s2.getTarget(0))
816
817 s2_ftb_entry_dup(0).display(true.B)
818
819 XSPerfAccumulate("ftb_read_hits", RegNext(io.s0_fire(0)) && s1_hit)
820 XSPerfAccumulate("ftb_read_misses", RegNext(io.s0_fire(0)) && !s1_hit)
821
822 XSPerfAccumulate("ftb_commit_hits", io.update.valid && u_meta.hit)
823 XSPerfAccumulate("ftb_commit_misses", io.update.valid && !u_meta.hit)
824
825 XSPerfAccumulate("ftb_update_req", io.update.valid)
826 XSPerfAccumulate("ftb_update_ignored", io.update.valid && io.update.bits.old_entry)
827 XSPerfAccumulate("ftb_updated", u_valid)
828
829 override val perfEvents = Seq(
863 XSDebug("s2_target=%x\n", io.out.s2.getTarget(0))
864
865 s2_ftb_entry_dup(0).display(true.B)
866
867 XSPerfAccumulate("ftb_read_hits", RegNext(io.s0_fire(0)) && s1_hit)
868 XSPerfAccumulate("ftb_read_misses", RegNext(io.s0_fire(0)) && !s1_hit)
869
870 XSPerfAccumulate("ftb_commit_hits", io.update.valid && u_meta.hit)
871 XSPerfAccumulate("ftb_commit_misses", io.update.valid && !u_meta.hit)
872
873 XSPerfAccumulate("ftb_update_req", io.update.valid)
874 XSPerfAccumulate("ftb_update_ignored", io.update.valid && io.update.bits.old_entry)
875 XSPerfAccumulate("ftb_updated", u_valid)
876
877 override val perfEvents = Seq(
830 ("ftb_commit_hits ", io.update.valid && u_meta.hit),
831 ("ftb_commit_misses ", io.update.valid && !u_meta.hit),
878 ("ftb_commit_hits ", io.update.valid && u_meta.hit),
879 ("ftb_commit_misses ", io.update.valid && !u_meta.hit)
832 )
833 generatePerfEvent()
834}
880 )
881 generatePerfEvent()
882}