1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chisel3._ 20import chisel3.util._ 21import org.chipsalliance.cde.config.Parameters 22import scala.math.min 23import utility._ 24import xiangshan._ 25 26trait HasBPUConst extends HasXSParameter { 27 val MaxMetaBaseLength = if (!env.FPGAPlatform) 512 else 256 // TODO: Reduce meta length 28 val MaxMetaLength = if (HasHExtension) MaxMetaBaseLength + 4 else MaxMetaBaseLength 29 val MaxBasicBlockSize = 32 30 val LHistoryLength = 32 31 // val numBr = 2 32 val useBPD = true 33 val useLHist = true 34 val numBrSlot = numBr - 1 35 val totalSlot = numBrSlot + 1 36 37 val numDup = 4 38 39 // Used to gate PC higher parts 40 val pcSegments = Seq(VAddrBits - 24, 12, 12) 41 42 def BP_STAGES = (0 until 3).map(_.U(2.W)) 43 def BP_S1 = BP_STAGES(0) 44 def BP_S2 = BP_STAGES(1) 45 def BP_S3 = BP_STAGES(2) 46 47 def dup_seq[T](src: T, num: Int = numDup) = Seq.tabulate(num)(n => src) 48 def dup[T <: Data](src: T, num: Int = numDup) = VecInit(Seq.tabulate(num)(n => src)) 49 def dup_wire[T <: Data](src: T, num: Int = numDup) = Wire(Vec(num, src.cloneType)) 50 def dup_idx = Seq.tabulate(numDup)(n => n.toString()) 51 val numBpStages = BP_STAGES.length 52 53 val debug = true 54 // TODO: Replace log2Up by log2Ceil 55} 56 57trait HasBPUParameter extends HasXSParameter with HasBPUConst { 58 val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug 59 val EnableCFICommitLog = true 60 val EnbaleCFIPredLog = true 61 val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform 62 val EnableCommit = false 63} 64 65class BPUCtrl(implicit p: Parameters) extends XSBundle { 66 val ubtb_enable = Bool() 67 val btb_enable = Bool() 68 val bim_enable = Bool() 69 val tage_enable = Bool() 70 val sc_enable = Bool() 71 val ras_enable = Bool() 72 val loop_enable = Bool() 73} 74 75trait BPUUtils extends HasXSParameter { 76 // circular shifting 77 def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = { 78 val res = Wire(UInt(len.W)) 79 val higher = source << shamt 80 val lower = source >> (len.U - shamt) 81 res := higher | lower 82 res 83 } 84 85 def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = { 86 val res = Wire(UInt(len.W)) 87 val higher = source << (len.U - shamt) 88 val lower = source >> shamt 89 res := higher | lower 90 res 91 } 92 93 // To be verified 94 def satUpdate(old: UInt, len: Int, taken: Bool): UInt = { 95 val oldSatTaken = old === ((1 << len) - 1).U 96 val oldSatNotTaken = old === 0.U 97 Mux(oldSatTaken && taken, ((1 << len) - 1).U, Mux(oldSatNotTaken && !taken, 0.U, Mux(taken, old + 1.U, old - 1.U))) 98 } 99 100 def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = { 101 val oldSatTaken = old === ((1 << (len - 1)) - 1).S 102 val oldSatNotTaken = old === (-(1 << (len - 1))).S 103 Mux( 104 oldSatTaken && taken, 105 ((1 << (len - 1)) - 1).S, 106 Mux(oldSatNotTaken && !taken, (-(1 << (len - 1))).S, Mux(taken, old + 1.S, old - 1.S)) 107 ) 108 } 109 110 def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = { 111 val higher = start.head(VAddrBits - log2Ceil(PredictWidth) - instOffsetBits) 112 Cat(Mux(carry, higher + 1.U, higher), pft, 0.U(instOffsetBits.W)) 113 } 114 115 def foldTag(tag: UInt, l: Int): UInt = { 116 val nChunks = (tag.getWidth + l - 1) / l 117 val chunks = (0 until nChunks).map(i => tag(min((i + 1) * l, tag.getWidth) - 1, i * l)) 118 ParallelXOR(chunks) 119 } 120} 121 122class BasePredictorInput(implicit p: Parameters) extends XSBundle with HasBPUConst { 123 def nInputs = 1 124 125 val s0_pc = Vec(numDup, UInt(VAddrBits.W)) 126 127 val folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos)) 128 val s1_folded_hist = Vec(numDup, new AllFoldedHistories(foldedGHistInfos)) 129 val ghist = UInt(HistoryLength.W) 130 131 val resp_in = Vec(nInputs, new BranchPredictionResp) 132 133 // val final_preds = Vec(numBpStages, new) 134 // val toFtq_fire = Bool() 135 136 // val s0_all_ready = Bool() 137} 138 139class BasePredictorOutput(implicit p: Parameters) extends BranchPredictionResp {} 140 141class BasePredictorIO(implicit p: Parameters) extends XSBundle with HasBPUConst { 142 val reset_vector = Input(UInt(PAddrBits.W)) 143 val in = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO 144 // val out = DecoupledIO(new BasePredictorOutput) 145 val out = Output(new BasePredictorOutput) 146 // val flush_out = Valid(UInt(VAddrBits.W)) 147 148 val fauftb_entry_in = Input(new FTBEntry) 149 val fauftb_entry_hit_in = Input(Bool()) 150 val fauftb_entry_out = Output(new FTBEntry) 151 val fauftb_entry_hit_out = Output(Bool()) 152 153 val ctrl = Input(new BPUCtrl) 154 155 val s0_fire = Input(Vec(numDup, Bool())) 156 val s1_fire = Input(Vec(numDup, Bool())) 157 val s2_fire = Input(Vec(numDup, Bool())) 158 val s3_fire = Input(Vec(numDup, Bool())) 159 160 val s2_redirect = Input(Vec(numDup, Bool())) 161 val s3_redirect = Input(Vec(numDup, Bool())) 162 163 val s1_ready = Output(Bool()) 164 val s2_ready = Output(Bool()) 165 val s3_ready = Output(Bool()) 166 167 val update = Flipped(Valid(new BranchPredictionUpdate)) 168 val redirect = Flipped(Valid(new BranchPredictionRedirect)) 169 val redirectFromIFU = Input(Bool()) 170} 171 172abstract class BasePredictor(implicit p: Parameters) extends XSModule 173 with HasBPUConst with BPUUtils with HasPerfEvents { 174 val meta_size = 0 175 val spec_meta_size = 0 176 val is_fast_pred = false 177 val io = IO(new BasePredictorIO()) 178 179 io.out := io.in.bits.resp_in(0) 180 181 io.fauftb_entry_out := io.fauftb_entry_in 182 io.fauftb_entry_hit_out := io.fauftb_entry_hit_in 183 184 io.out.last_stage_meta := 0.U 185 186 io.in.ready := !io.redirect.valid 187 188 io.s1_ready := true.B 189 io.s2_ready := true.B 190 io.s3_ready := true.B 191 192 val s0_pc_dup = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc) 193 val s1_pc_dup = s0_pc_dup.zip(io.s0_fire).map { case (s0_pc, s0_fire) => RegEnable(s0_pc, s0_fire) } 194 val s2_pc_dup = s1_pc_dup.zip(io.s1_fire).map { case (s1_pc, s1_fire) => 195 SegmentedAddrNext(s1_pc, pcSegments, s1_fire, Some("s2_pc")) 196 } 197 val s3_pc_dup = s2_pc_dup.zip(io.s2_fire).map { case (s2_pc, s2_fire) => 198 SegmentedAddrNext(s2_pc, s2_fire, Some("s3_pc")) 199 } 200 201 when(RegNext(RegNext(reset.asBool) && !reset.asBool)) { 202 s1_pc_dup.map { case s1_pc => s1_pc := io.reset_vector } 203 } 204 205 io.out.s1.pc := s1_pc_dup 206 io.out.s2.pc := s2_pc_dup.map(_.getAddr()) 207 io.out.s3.pc := s3_pc_dup.map(_.getAddr()) 208 209 val perfEvents: Seq[(String, UInt)] = Seq() 210 211 def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None 212} 213 214class FakePredictor(implicit p: Parameters) extends BasePredictor { 215 io.in.ready := true.B 216 io.out.last_stage_meta := 0.U 217 io.out := io.in.bits.resp_in(0) 218} 219 220class BpuToFtqIO(implicit p: Parameters) extends XSBundle { 221 val resp = DecoupledIO(new BpuToFtqBundle()) 222} 223 224class PredictorIO(implicit p: Parameters) extends XSBundle { 225 val bpu_to_ftq = new BpuToFtqIO() 226 val ftq_to_bpu = Flipped(new FtqToBpuIO) 227 val ctrl = Input(new BPUCtrl) 228 val reset_vector = Input(UInt(PAddrBits.W)) 229} 230 231class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents 232 with HasCircularQueuePtrHelper { 233 val io = IO(new PredictorIO) 234 235 val ctrl = DelayN(io.ctrl, 1) 236 val predictors = Module(if (useBPD) new Composer else new FakePredictor) 237 238 def numOfStage = 3 239 require(numOfStage > 1, "BPU numOfStage must be greater than 1") 240 val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle)))) 241 242 // following can only happen on s1 243 val controlRedirectBubble = Wire(Bool()) 244 val ControlBTBMissBubble = Wire(Bool()) 245 val TAGEMissBubble = Wire(Bool()) 246 val SCMissBubble = Wire(Bool()) 247 val ITTAGEMissBubble = Wire(Bool()) 248 val RASMissBubble = Wire(Bool()) 249 250 val memVioRedirectBubble = Wire(Bool()) 251 val otherRedirectBubble = Wire(Bool()) 252 val btbMissBubble = Wire(Bool()) 253 otherRedirectBubble := false.B 254 memVioRedirectBubble := false.B 255 256 // override can happen between s1-s2 and s2-s3 257 val overrideBubble = Wire(Vec(numOfStage - 1, Bool())) 258 def overrideStage = 1 259 // ftq update block can happen on s1, s2 and s3 260 val ftqUpdateBubble = Wire(Vec(numOfStage, Bool())) 261 def ftqUpdateStage = 0 262 // ftq full stall only happens on s3 (last stage) 263 val ftqFullStall = Wire(Bool()) 264 265 // by default, no bubble event 266 topdown_stages(0) := 0.U.asTypeOf(new FrontendTopDownBundle) 267 // event movement driven by clock only 268 for (i <- 0 until numOfStage - 1) { 269 topdown_stages(i + 1) := topdown_stages(i) 270 } 271 272 // ctrl signal 273 predictors.io.ctrl := ctrl 274 predictors.io.reset_vector := io.reset_vector 275 276 val s0_stall_dup = dup_wire(Bool()) // For some reason s0 stalled, usually FTQ Full 277 val s0_fire_dup, s1_fire_dup, s2_fire_dup, s3_fire_dup = dup_wire(Bool()) 278 val s1_valid_dup, s2_valid_dup, s3_valid_dup = dup_seq(RegInit(false.B)) 279 val s1_ready_dup, s2_ready_dup, s3_ready_dup = dup_wire(Bool()) 280 val s1_components_ready_dup, s2_components_ready_dup, s3_components_ready_dup = dup_wire(Bool()) 281 282 val s0_pc_dup = dup(WireInit(0.U.asTypeOf(UInt(VAddrBits.W)))) 283 val s0_pc_reg_dup = s0_pc_dup.zip(s0_stall_dup).map { case (s0_pc, s0_stall) => RegEnable(s0_pc, !s0_stall) } 284 when(RegNext(RegNext(reset.asBool) && !reset.asBool)) { 285 s0_pc_reg_dup.map { case s0_pc => s0_pc := io.reset_vector } 286 } 287 val s1_pc = RegEnable(s0_pc_dup(0), s0_fire_dup(0)) 288 val s2_pc = RegEnable(s1_pc, s1_fire_dup(0)) 289 val s3_pc = RegEnable(s2_pc, s2_fire_dup(0)) 290 291 val s0_folded_gh_dup = dup_wire(new AllFoldedHistories(foldedGHistInfos)) 292 val s0_folded_gh_reg_dup = s0_folded_gh_dup.zip(s0_stall_dup).map { 293 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_folded_gh_dup(0)), !s0_stall) 294 } 295 val s1_folded_gh_dup = RegEnable(s0_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s0_fire_dup(1)) 296 val s2_folded_gh_dup = RegEnable(s1_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s1_fire_dup(1)) 297 val s3_folded_gh_dup = RegEnable(s2_folded_gh_dup, 0.U.asTypeOf(s0_folded_gh_dup), s2_fire_dup(1)) 298 299 val s0_last_br_num_oh_dup = dup_wire(UInt((numBr + 1).W)) 300 val s0_last_br_num_oh_reg_dup = s0_last_br_num_oh_dup.zip(s0_stall_dup).map { 301 case (x, s0_stall) => RegEnable(x, 0.U, !s0_stall) 302 } 303 val s1_last_br_num_oh_dup = RegEnable(s0_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s0_fire_dup(1)) 304 val s2_last_br_num_oh_dup = RegEnable(s1_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s1_fire_dup(1)) 305 val s3_last_br_num_oh_dup = RegEnable(s2_last_br_num_oh_dup, 0.U.asTypeOf(s0_last_br_num_oh_dup), s2_fire_dup(1)) 306 307 val s0_ahead_fh_oldest_bits_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 308 val s0_ahead_fh_oldest_bits_reg_dup = s0_ahead_fh_oldest_bits_dup.zip(s0_stall_dup).map { 309 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup(0)), !s0_stall) 310 } 311 val s1_ahead_fh_oldest_bits_dup = 312 RegEnable(s0_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s0_fire_dup(1)) 313 val s2_ahead_fh_oldest_bits_dup = 314 RegEnable(s1_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s1_fire_dup(1)) 315 val s3_ahead_fh_oldest_bits_dup = 316 RegEnable(s2_ahead_fh_oldest_bits_dup, 0.U.asTypeOf(s0_ahead_fh_oldest_bits_dup), s2_fire_dup(1)) 317 318 val npcGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt]) 319 val foldedGhGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllFoldedHistories]) 320 val ghistPtrGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[CGHPtr]) 321 val lastBrNumOHGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[UInt]) 322 val aheadFhObGen_dup = Seq.tabulate(numDup)(n => new PhyPriorityMuxGenerator[AllAheadFoldedHistoryOldestBits]) 323 324 val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool]) 325 // val ghistGen = new PhyPriorityMuxGenerator[UInt] 326 327 val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 328 val ghv_wire = WireInit(ghv) 329 330 val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W))) 331 332 println(f"history buffer length ${HistoryLength}") 333 val ghv_write_datas = Wire(Vec(HistoryLength, Bool())) 334 val ghv_wens = Wire(Vec(HistoryLength, Bool())) 335 336 val s0_ghist_ptr_dup = dup_wire(new CGHPtr) 337 val s0_ghist_ptr_reg_dup = s0_ghist_ptr_dup.zip(s0_stall_dup).map { 338 case (x, s0_stall) => RegEnable(x, 0.U.asTypeOf(new CGHPtr), !s0_stall) 339 } 340 val s1_ghist_ptr_dup = RegEnable(s0_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s0_fire_dup(1)) 341 val s2_ghist_ptr_dup = RegEnable(s1_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s1_fire_dup(1)) 342 val s3_ghist_ptr_dup = RegEnable(s2_ghist_ptr_dup, 0.U.asTypeOf(s0_ghist_ptr_dup), s2_fire_dup(1)) 343 344 def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0) 345 s0_ghist := getHist(s0_ghist_ptr_dup(0)) 346 347 val resp = predictors.io.out 348 349 val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready 350 351 val s1_flush_dup, s2_flush_dup, s3_flush_dup = dup_wire(Bool()) 352 val s2_redirect_dup, s3_redirect_dup = dup_wire(Bool()) 353 354 // predictors.io := DontCare 355 predictors.io.in.valid := s0_fire_dup(0) 356 predictors.io.in.bits.s0_pc := s0_pc_dup 357 predictors.io.in.bits.ghist := s0_ghist 358 predictors.io.in.bits.folded_hist := s0_folded_gh_dup 359 predictors.io.in.bits.s1_folded_hist := s1_folded_gh_dup 360 predictors.io.in.bits.resp_in(0) := 0.U.asTypeOf(new BranchPredictionResp) 361 predictors.io.fauftb_entry_in := 0.U.asTypeOf(new FTBEntry) 362 predictors.io.fauftb_entry_hit_in := false.B 363 predictors.io.redirectFromIFU := RegNext(io.ftq_to_bpu.redirctFromIFU, init = false.B) 364 // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc 365 // predictors.io.in.bits.toFtq_fire := toFtq_fire 366 367 // predictors.io.out.ready := io.bpu_to_ftq.resp.ready 368 369 val redirect_req = io.ftq_to_bpu.redirect 370 val do_redirect_dup = dup_seq(RegNextWithEnable(redirect_req)) 371 372 // Pipeline logic 373 s2_redirect_dup.map(_ := false.B) 374 s3_redirect_dup.map(_ := false.B) 375 376 s3_flush_dup.map(_ := redirect_req.valid) // flush when redirect comes 377 for (((s2_flush, s3_flush), s3_redirect) <- s2_flush_dup zip s3_flush_dup zip s3_redirect_dup) 378 s2_flush := s3_flush || s3_redirect 379 for (((s1_flush, s2_flush), s2_redirect) <- s1_flush_dup zip s2_flush_dup zip s2_redirect_dup) 380 s1_flush := s2_flush || s2_redirect 381 382 s1_components_ready_dup.map(_ := predictors.io.s1_ready) 383 for (((s1_ready, s1_fire), s1_valid) <- s1_ready_dup zip s1_fire_dup zip s1_valid_dup) 384 s1_ready := s1_fire || !s1_valid 385 for (((s0_fire, s1_components_ready), s1_ready) <- s0_fire_dup zip s1_components_ready_dup zip s1_ready_dup) 386 s0_fire := s1_components_ready && s1_ready 387 predictors.io.s0_fire := s0_fire_dup 388 389 s2_components_ready_dup.map(_ := predictors.io.s2_ready) 390 for (((s2_ready, s2_fire), s2_valid) <- s2_ready_dup zip s2_fire_dup zip s2_valid_dup) 391 s2_ready := s2_fire || !s2_valid 392 for ( 393 (((s1_fire, s2_components_ready), s2_ready), s1_valid) <- 394 s1_fire_dup zip s2_components_ready_dup zip s2_ready_dup zip s1_valid_dup 395 ) 396 s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready 397 398 s3_components_ready_dup.map(_ := predictors.io.s3_ready) 399 for (((s3_ready, s3_fire), s3_valid) <- s3_ready_dup zip s3_fire_dup zip s3_valid_dup) 400 s3_ready := s3_fire || !s3_valid 401 for ( 402 (((s2_fire, s3_components_ready), s3_ready), s2_valid) <- 403 s2_fire_dup zip s3_components_ready_dup zip s3_ready_dup zip s2_valid_dup 404 ) 405 s2_fire := s2_valid && s3_components_ready && s3_ready 406 407 for ((((s0_fire, s1_flush), s1_fire), s1_valid) <- s0_fire_dup zip s1_flush_dup zip s1_fire_dup zip s1_valid_dup) { 408 when(redirect_req.valid)(s1_valid := false.B) 409 .elsewhen(s0_fire)(s1_valid := true.B) 410 .elsewhen(s1_flush)(s1_valid := false.B) 411 .elsewhen(s1_fire)(s1_valid := false.B) 412 } 413 predictors.io.s1_fire := s1_fire_dup 414 415 for ( 416 ((((s1_fire, s2_flush), s2_fire), s2_valid), s1_flush) <- 417 s1_fire_dup zip s2_flush_dup zip s2_fire_dup zip s2_valid_dup zip s1_flush_dup 418 ) { 419 420 when(s2_flush)(s2_valid := false.B) 421 .elsewhen(s1_fire)(s2_valid := !s1_flush) 422 .elsewhen(s2_fire)(s2_valid := false.B) 423 } 424 425 predictors.io.s2_fire := s2_fire_dup 426 predictors.io.s2_redirect := s2_redirect_dup 427 428 s3_fire_dup := s3_valid_dup 429 430 for ( 431 ((((s2_fire, s3_flush), s3_fire), s3_valid), s2_flush) <- 432 s2_fire_dup zip s3_flush_dup zip s3_fire_dup zip s3_valid_dup zip s2_flush_dup 433 ) { 434 435 when(s3_flush)(s3_valid := false.B) 436 .elsewhen(s2_fire)(s3_valid := !s2_flush) 437 .elsewhen(s3_fire)(s3_valid := false.B) 438 } 439 440 predictors.io.s3_fire := s3_fire_dup 441 predictors.io.s3_redirect := s3_redirect_dup 442 443 io.bpu_to_ftq.resp.valid := 444 s1_valid_dup(2) && s2_components_ready_dup(2) && s2_ready_dup(2) || 445 s2_fire_dup(2) && s2_redirect_dup(2) || 446 s3_fire_dup(2) && s3_redirect_dup(2) 447 io.bpu_to_ftq.resp.bits := predictors.io.out 448 io.bpu_to_ftq.resp.bits.last_stage_spec_info.histPtr := s3_ghist_ptr_dup(2) 449 450 val full_pred_diff = WireInit(false.B) 451 val full_pred_diff_stage = WireInit(0.U) 452 val full_pred_diff_offset = WireInit(0.U) 453 for (i <- 0 until numDup - 1) { 454 when(io.bpu_to_ftq.resp.valid && 455 ((io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred( 456 i + 1 457 ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s1.full_pred(i).hit) || 458 (io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred( 459 i + 1 460 ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s2.full_pred(i).hit) || 461 (io.bpu_to_ftq.resp.bits.s3.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s3.full_pred( 462 i + 1 463 ).asTypeOf(UInt()) && io.bpu_to_ftq.resp.bits.s3.full_pred(i).hit))) { 464 full_pred_diff := true.B 465 full_pred_diff_offset := i.U 466 when(io.bpu_to_ftq.resp.bits.s1.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s1.full_pred( 467 i + 1 468 ).asTypeOf(UInt())) { 469 full_pred_diff_stage := 1.U 470 }.elsewhen(io.bpu_to_ftq.resp.bits.s2.full_pred(i).asTypeOf(UInt()) =/= io.bpu_to_ftq.resp.bits.s2.full_pred( 471 i + 1 472 ).asTypeOf(UInt())) { 473 full_pred_diff_stage := 2.U 474 }.otherwise { 475 full_pred_diff_stage := 3.U 476 } 477 } 478 } 479 XSError(full_pred_diff, "Full prediction difference detected!") 480 481 // s0_stall should be exclusive with any other PC source 482 s0_stall_dup.zip(s1_valid_dup).zip(s2_redirect_dup).zip(s3_redirect_dup).zip(do_redirect_dup).foreach { 483 case ((((s0_stall, s1_valid), s2_redirect), s3_redirect), do_redirect) => { 484 s0_stall := !(s1_valid || s2_redirect || s3_redirect || do_redirect.valid) 485 } 486 } 487 // Power-on reset 488 val powerOnResetState = RegInit(true.B) 489 when(s0_fire_dup(0)) { 490 // When BPU pipeline first time fire, we consider power-on reset is done 491 powerOnResetState := false.B 492 } 493 XSError( 494 !powerOnResetState && s0_stall_dup(0) && s0_pc_dup(0) =/= s0_pc_reg_dup(0), 495 "s0_stall but s0_pc is differenct from s0_pc_reg" 496 ) 497 498 npcGen_dup.zip(s0_pc_reg_dup).map { case (gen, reg) => 499 gen.register(true.B, reg, Some("stallPC"), 0) 500 } 501 foldedGhGen_dup.zip(s0_folded_gh_reg_dup).map { case (gen, reg) => 502 gen.register(true.B, reg, Some("stallFGH"), 0) 503 } 504 ghistPtrGen_dup.zip(s0_ghist_ptr_reg_dup).map { case (gen, reg) => 505 gen.register(true.B, reg, Some("stallGHPtr"), 0) 506 } 507 lastBrNumOHGen_dup.zip(s0_last_br_num_oh_reg_dup).map { case (gen, reg) => 508 gen.register(true.B, reg, Some("stallBrNumOH"), 0) 509 } 510 aheadFhObGen_dup.zip(s0_ahead_fh_oldest_bits_reg_dup).map { case (gen, reg) => 511 gen.register(true.B, reg, Some("stallAFHOB"), 0) 512 } 513 514 // assign pred cycle for profiling 515 io.bpu_to_ftq.resp.bits.s1.full_pred.map(_.predCycle.map(_ := GTimer())) 516 io.bpu_to_ftq.resp.bits.s2.full_pred.map(_.predCycle.map(_ := GTimer())) 517 io.bpu_to_ftq.resp.bits.s3.full_pred.map(_.predCycle.map(_ := GTimer())) 518 519 // History manage 520 // s1 521 val s1_possible_predicted_ghist_ptrs_dup = s1_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 522 val s1_predicted_ghist_ptr_dup = s1_possible_predicted_ghist_ptrs_dup.zip(resp.s1.lastBrPosOH).map { case (ptr, oh) => 523 Mux1H(oh, ptr) 524 } 525 val s1_possible_predicted_fhs_dup = 526 for ( 527 ((((fgh, afh), br_num_oh), t), br_pos_oh) <- 528 s1_folded_gh_dup zip s1_ahead_fh_oldest_bits_dup zip s1_last_br_num_oh_dup zip resp.s1.brTaken zip resp.s1.lastBrPosOH 529 ) 530 yield (0 to numBr).map(i => 531 fgh.update(afh, br_num_oh, i, t & br_pos_oh(i)) 532 ) 533 val s1_predicted_fh_dup = resp.s1.lastBrPosOH.zip(s1_possible_predicted_fhs_dup).map { case (oh, fh) => 534 Mux1H(oh, fh) 535 } 536 537 val s1_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 538 s1_ahead_fh_ob_src_dup.zip(s1_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) } 539 540 if (EnableGHistDiff) { 541 val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 542 for (i <- 0 until numBr) { 543 when(resp.s1.shouldShiftVec(0)(i)) { 544 s1_predicted_ghist(i) := resp.s1.brTaken(0) && (i == 0).B 545 } 546 } 547 when(s1_valid_dup(0)) { 548 s0_ghist := s1_predicted_ghist.asUInt 549 } 550 } 551 552 val s1_ghv_wens = (0 until HistoryLength).map(n => 553 (0 until numBr).map(b => 554 s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && 555 resp.s1.shouldShiftVec(0)(b) && s1_valid_dup(0) 556 ) 557 ) 558 val s1_ghv_wdatas = (0 until HistoryLength).map(n => 559 Mux1H( 560 (0 until numBr).map(b => 561 ( 562 s1_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s1.shouldShiftVec(0)(b), 563 resp.s1.brTaken(0) && resp.s1.lastBrPosOH(0)(b + 1) 564 ) 565 ) 566 ) 567 ) 568 569 for (((npcGen, s1_valid), s1_target) <- npcGen_dup zip s1_valid_dup zip resp.s1.getTarget) 570 npcGen.register(s1_valid, s1_target, Some("s1_target"), 4) 571 for (((foldedGhGen, s1_valid), s1_predicted_fh) <- foldedGhGen_dup zip s1_valid_dup zip s1_predicted_fh_dup) 572 foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4) 573 for ( 574 ((ghistPtrGen, s1_valid), s1_predicted_ghist_ptr) <- ghistPtrGen_dup zip s1_valid_dup zip s1_predicted_ghist_ptr_dup 575 ) 576 ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4) 577 for ( 578 ((lastBrNumOHGen, s1_valid), s1_brPosOH) <- 579 lastBrNumOHGen_dup zip s1_valid_dup zip resp.s1.lastBrPosOH.map(_.asUInt) 580 ) 581 lastBrNumOHGen.register(s1_valid, s1_brPosOH, Some("s1_BrNumOH"), 4) 582 for (((aheadFhObGen, s1_valid), s1_ahead_fh_ob_src) <- aheadFhObGen_dup zip s1_valid_dup zip s1_ahead_fh_ob_src_dup) 583 aheadFhObGen.register(s1_valid, s1_ahead_fh_ob_src, Some("s1_AFHOB"), 4) 584 ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map { case ((b, w), i) => 585 b.register(w.reduce(_ || _), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4) 586 } 587 588 class PreviousPredInfo extends Bundle { 589 val hit = Vec(numDup, Bool()) 590 val target = Vec(numDup, UInt(VAddrBits.W)) 591 val lastBrPosOH = Vec(numDup, Vec(numBr + 1, Bool())) 592 val taken = Vec(numDup, Bool()) 593 val takenMask = Vec(numDup, Vec(numBr, Bool())) 594 val cfiIndex = Vec(numDup, UInt(log2Ceil(PredictWidth).W)) 595 } 596 597 def preds_needs_redirect_vec_dup(x: PreviousPredInfo, y: BranchPredictionBundle) = { 598 // Timing optimization 599 // We first compare all target with previous stage target, 600 // then select the difference by taken & hit 601 // Usually target is generated quicker than taken, so do target compare before select can help timing 602 val targetDiffVec: IndexedSeq[Vec[Bool]] = 603 x.target.zip(y.getAllTargets).map { 604 case (xTarget, yAllTarget) => VecInit(yAllTarget.map(_ =/= xTarget)) 605 } // [numDup][all Target comparison] 606 val targetDiff: IndexedSeq[Bool] = 607 targetDiffVec.zip(x.hit).zip(x.takenMask).map { 608 case ((diff, hit), takenMask) => selectByTaken(takenMask, hit, diff) 609 } // [numDup] 610 611 val lastBrPosOHDiff: IndexedSeq[Bool] = x.lastBrPosOH.zip(y.lastBrPosOH).map { case (oh1, oh2) => 612 oh1.asUInt =/= oh2.asUInt 613 } 614 val takenDiff: IndexedSeq[Bool] = x.taken.zip(y.taken).map { case (t1, t2) => t1 =/= t2 } 615 val takenOffsetDiff: IndexedSeq[Bool] = x.cfiIndex.zip(y.cfiIndex).zip(x.taken).zip(y.taken).map { 616 case (((i1, i2), xt), yt) => xt && yt && i1 =/= i2.bits 617 } 618 VecInit( 619 for ( 620 (((tgtd, lbpohd), tkd), tod) <- 621 targetDiff zip lastBrPosOHDiff zip takenDiff zip takenOffsetDiff 622 ) 623 yield VecInit(tgtd, lbpohd, tkd, tod) 624 // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt, 625 // x.brTaken =/= y.brTaken 626 ) 627 } 628 629 // s2 630 val s2_possible_predicted_ghist_ptrs_dup = s2_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 631 val s2_predicted_ghist_ptr_dup = s2_possible_predicted_ghist_ptrs_dup.zip(resp.s2.lastBrPosOH).map { case (ptr, oh) => 632 Mux1H(oh, ptr) 633 } 634 635 val s2_possible_predicted_fhs_dup = 636 for ( 637 (((fgh, afh), br_num_oh), full_pred) <- 638 s2_folded_gh_dup zip s2_ahead_fh_oldest_bits_dup zip s2_last_br_num_oh_dup zip resp.s2.full_pred 639 ) 640 yield (0 to numBr).map(i => 641 fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B) 642 ) 643 val s2_predicted_fh_dup = resp.s2.lastBrPosOH.zip(s2_possible_predicted_fhs_dup).map { case (oh, fh) => 644 Mux1H(oh, fh) 645 } 646 647 val s2_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 648 s2_ahead_fh_ob_src_dup.zip(s2_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) } 649 650 if (EnableGHistDiff) { 651 val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 652 for (i <- 0 until numBr) { 653 when(resp.s2.shouldShiftVec(0)(i)) { 654 s2_predicted_ghist(i) := resp.s2.brTaken(0) && (i == 0).B 655 } 656 } 657 when(s2_redirect_dup(0)) { 658 s0_ghist := s2_predicted_ghist.asUInt 659 } 660 } 661 662 val s2_ghv_wens = (0 until HistoryLength).map(n => 663 (0 until numBr).map(b => 664 s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && 665 resp.s2.shouldShiftVec(0)(b) && s2_redirect_dup(0) 666 ) 667 ) 668 val s2_ghv_wdatas = (0 until HistoryLength).map(n => 669 Mux1H( 670 (0 until numBr).map(b => 671 ( 672 s2_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s2.shouldShiftVec(0)(b), 673 resp.s2.full_pred(0).real_br_taken_mask()(b) 674 ) 675 ) 676 ) 677 ) 678 679 val s1_pred_info = Wire(new PreviousPredInfo) 680 s1_pred_info.hit := resp.s1.full_pred.map(_.hit) 681 s1_pred_info.target := resp.s1.getTarget 682 s1_pred_info.lastBrPosOH := resp.s1.lastBrPosOH 683 s1_pred_info.taken := resp.s1.taken 684 s1_pred_info.takenMask := resp.s1.full_pred.map(_.taken_mask_on_slot) 685 s1_pred_info.cfiIndex := resp.s1.cfiIndex.map { case x => x.bits } 686 687 val previous_s1_pred_info = RegEnable(s1_pred_info, 0.U.asTypeOf(new PreviousPredInfo), s1_fire_dup(0)) 688 689 val s2_redirect_s1_last_pred_vec_dup = preds_needs_redirect_vec_dup(previous_s1_pred_info, resp.s2) 690 691 for ( 692 ((s2_redirect, s2_fire), s2_redirect_s1_last_pred_vec) <- 693 s2_redirect_dup zip s2_fire_dup zip s2_redirect_s1_last_pred_vec_dup 694 ) 695 s2_redirect := s2_fire && s2_redirect_s1_last_pred_vec.reduce(_ || _) 696 697 for (((npcGen, s2_redirect), s2_target) <- npcGen_dup zip s2_redirect_dup zip resp.s2.getTarget) 698 npcGen.register(s2_redirect, s2_target, Some("s2_target"), 5) 699 for (((foldedGhGen, s2_redirect), s2_predicted_fh) <- foldedGhGen_dup zip s2_redirect_dup zip s2_predicted_fh_dup) 700 foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5) 701 for ( 702 ((ghistPtrGen, s2_redirect), s2_predicted_ghist_ptr) <- 703 ghistPtrGen_dup zip s2_redirect_dup zip s2_predicted_ghist_ptr_dup 704 ) 705 ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5) 706 for ( 707 ((lastBrNumOHGen, s2_redirect), s2_brPosOH) <- 708 lastBrNumOHGen_dup zip s2_redirect_dup zip resp.s2.lastBrPosOH.map(_.asUInt) 709 ) 710 lastBrNumOHGen.register(s2_redirect, s2_brPosOH, Some("s2_BrNumOH"), 5) 711 for ( 712 ((aheadFhObGen, s2_redirect), s2_ahead_fh_ob_src) <- aheadFhObGen_dup zip s2_redirect_dup zip s2_ahead_fh_ob_src_dup 713 ) 714 aheadFhObGen.register(s2_redirect, s2_ahead_fh_ob_src, Some("s2_AFHOB"), 5) 715 ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map { case ((b, w), i) => 716 b.register(w.reduce(_ || _), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5) 717 } 718 719 XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(0)) 720 XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(1)) 721 XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(2)) 722 XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire_dup(0) && s2_redirect_s1_last_pred_vec_dup(0)(3)) 723 // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4)) 724 // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5)) 725 XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire_dup(0) && resp.s2.fallThruError(0)) 726 727 XSPerfAccumulate("s2_redirect_when_taken", s2_redirect_dup(0) && resp.s2.taken(0) && resp.s2.full_pred(0).hit) 728 XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect_dup(0) && !resp.s2.taken(0) && resp.s2.full_pred(0).hit) 729 XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect_dup(0) && !resp.s2.full_pred(0).hit) 730 731 // s3 732 val s3_possible_predicted_ghist_ptrs_dup = s3_ghist_ptr_dup.map(ptr => (0 to numBr).map(ptr - _.U)) 733 val s3_predicted_ghist_ptr_dup = s3_possible_predicted_ghist_ptrs_dup.zip(resp.s3.lastBrPosOH).map { case (ptr, oh) => 734 Mux1H(oh, ptr) 735 } 736 737 val s3_possible_predicted_fhs_dup = 738 for ( 739 (((fgh, afh), br_num_oh), full_pred) <- 740 s3_folded_gh_dup zip s3_ahead_fh_oldest_bits_dup zip s3_last_br_num_oh_dup zip resp.s3.full_pred 741 ) 742 yield (0 to numBr).map(i => 743 fgh.update(afh, br_num_oh, i, if (i > 0) full_pred.br_taken_mask(i - 1) else false.B) 744 ) 745 val s3_predicted_fh_dup = resp.s3.lastBrPosOH.zip(s3_possible_predicted_fhs_dup).map { case (oh, fh) => 746 Mux1H(oh, fh) 747 } 748 749 val s3_ahead_fh_ob_src_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 750 s3_ahead_fh_ob_src_dup.zip(s3_ghist_ptr_dup).map { case (src, ptr) => src.read(ghv, ptr) } 751 752 if (EnableGHistDiff) { 753 val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 754 for (i <- 0 until numBr) { 755 when(resp.s3.shouldShiftVec(0)(i)) { 756 s3_predicted_ghist(i) := resp.s3.brTaken(0) && (i == 0).B 757 } 758 } 759 when(s3_redirect_dup(0)) { 760 s0_ghist := s3_predicted_ghist.asUInt 761 } 762 } 763 764 val s3_ghv_wens = (0 until HistoryLength).map(n => 765 (0 until numBr).map(b => 766 s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)( 767 b 768 ) && s3_redirect_dup(0) 769 ) 770 ) 771 val s3_ghv_wdatas = (0 until HistoryLength).map(n => 772 Mux1H( 773 (0 until numBr).map(b => 774 ( 775 s3_ghist_ptr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && resp.s3.shouldShiftVec(0)(b), 776 resp.s3.full_pred(0).real_br_taken_mask()(b) 777 ) 778 ) 779 ) 780 ) 781 782 // To optimize Clock Gating Efficiency of previous_s2_* 783 val previous_s2_pred = Wire(new BranchPredictionBundle(isNotS3 = true)) 784 previous_s2_pred.pc := RegEnable(resp.s2.pc, 0.U.asTypeOf(resp.s2.pc), s2_fire_dup(0)).suggestName( 785 s"previous_s2_pred_pc" 786 ) 787 previous_s2_pred.valid := RegEnable(resp.s2.valid, 0.U.asTypeOf(resp.s2.valid), s2_fire_dup(0)).suggestName( 788 s"previous_s2_pred_valid" 789 ) 790 previous_s2_pred.hasRedirect := RegEnable( 791 resp.s2.hasRedirect, 792 0.U.asTypeOf(resp.s2.hasRedirect), 793 s2_fire_dup(0) 794 ).suggestName(s"previous_s2_pred_hasRedirect") 795 previous_s2_pred.ftq_idx := RegEnable(resp.s2.ftq_idx, 0.U.asTypeOf(resp.s2.ftq_idx), s2_fire_dup(0)).suggestName( 796 s"previous_s2_pred_ftq_idx" 797 ) 798 previous_s2_pred.full_pred := RegEnable( 799 resp.s2.full_pred, 800 0.U.asTypeOf(resp.s2.full_pred), 801 s2_fire_dup(0) 802 ).suggestName(s"previous_s2_pred_full_pred") 803 previous_s2_pred.full_pred.zip(resp.s2.full_pred.zipWithIndex).map { case (prev_fp, (new_fp, dupIdx)) => 804 prev_fp.targets.zip(new_fp.taken_mask_on_slot.zipWithIndex).map { case (target, (taken_mask, slotIdx)) => 805 // This enable signal can better improve CGE, but it may lead to timing violations: 806 // s2_fire_dup(0) && !new_fp.taken_mask_on_slot.take(slotIdx).fold(false.B)(_||_) && taken_mask && new_fp.hit 807 target := RegEnable(new_fp.targets(slotIdx), 0.U.asTypeOf(new_fp.targets(slotIdx)), s2_fire_dup(0) && taken_mask) 808 } 809 // This enable signal can better improve CGE, but it may lead to timing violations: 810 // s2_fire_dup(0) && new_fp.hit && !new_fp.taken_mask_on_slot.reduce(_||_) 811 prev_fp.fallThroughAddr := RegEnable( 812 new_fp.fallThroughAddr, 813 0.U.asTypeOf(new_fp.fallThroughAddr), 814 s2_fire_dup(0) && resp.s2.full_pred(0).hit && !resp.s2.full_pred(0).taken_mask_on_slot(0) 815 ) 816 } 817 818 val s3_redirect_on_br_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) => 819 fp1.real_br_taken_mask().asUInt =/= fp2.real_br_taken_mask().asUInt 820 } 821 val s3_both_first_taken_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) => 822 fp1.real_br_taken_mask()(0) && fp2.real_br_taken_mask()(0) 823 } 824 val s3_redirect_on_target_dup = resp.s3.getTarget.zip(previous_s2_pred.getTarget).map { case (t1, t2) => t1 =/= t2 } 825 val s3_redirect_on_jalr_target_dup = resp.s3.full_pred.zip(previous_s2_pred.full_pred).map { case (fp1, fp2) => 826 fp1.hit_taken_on_jalr && fp1.jalr_target =/= fp2.jalr_target 827 } 828 val s3_redirect_on_fall_thru_error_dup = resp.s3.fallThruError 829 val s3_redirect_on_ftb_multi_hit_dup = resp.s3.ftbMultiHit 830 831 for ( 832 ( 833 ( 834 ((((s3_redirect, s3_fire), s3_redirect_on_br_taken), s3_redirect_on_target), s3_redirect_on_fall_thru_error), 835 s3_redirect_on_ftb_multi_hit 836 ), 837 s3_both_first_taken 838 ) <- 839 s3_redirect_dup zip s3_fire_dup zip s3_redirect_on_br_taken_dup zip s3_redirect_on_target_dup zip s3_redirect_on_fall_thru_error_dup zip s3_redirect_on_ftb_multi_hit_dup zip s3_both_first_taken_dup 840 ) { 841 842 s3_redirect := s3_fire && ( 843 (s3_redirect_on_br_taken && !s3_both_first_taken) || s3_redirect_on_target || s3_redirect_on_fall_thru_error || s3_redirect_on_ftb_multi_hit 844 ) 845 } 846 847 XSPerfAccumulate(f"s3_redirect_on_br_taken", s3_fire_dup(0) && s3_redirect_on_br_taken_dup(0)) 848 XSPerfAccumulate(f"s3_redirect_on_jalr_target", s3_fire_dup(0) && s3_redirect_on_jalr_target_dup(0)) 849 XSPerfAccumulate( 850 f"s3_redirect_on_others", 851 s3_redirect_dup(0) && !(s3_redirect_on_br_taken_dup(0) || s3_redirect_on_jalr_target_dup(0)) 852 ) 853 854 for (((npcGen, s3_redirect), s3_target) <- npcGen_dup zip s3_redirect_dup zip resp.s3.getTarget) 855 npcGen.register(s3_redirect, s3_target, Some("s3_target"), 3) 856 for (((foldedGhGen, s3_redirect), s3_predicted_fh) <- foldedGhGen_dup zip s3_redirect_dup zip s3_predicted_fh_dup) 857 foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3) 858 for ( 859 ((ghistPtrGen, s3_redirect), s3_predicted_ghist_ptr) <- 860 ghistPtrGen_dup zip s3_redirect_dup zip s3_predicted_ghist_ptr_dup 861 ) 862 ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3) 863 for ( 864 ((lastBrNumOHGen, s3_redirect), s3_brPosOH) <- 865 lastBrNumOHGen_dup zip s3_redirect_dup zip resp.s3.lastBrPosOH.map(_.asUInt) 866 ) 867 lastBrNumOHGen.register(s3_redirect, s3_brPosOH, Some("s3_BrNumOH"), 3) 868 for ( 869 ((aheadFhObGen, s3_redirect), s3_ahead_fh_ob_src) <- aheadFhObGen_dup zip s3_redirect_dup zip s3_ahead_fh_ob_src_dup 870 ) 871 aheadFhObGen.register(s3_redirect, s3_ahead_fh_ob_src, Some("s3_AFHOB"), 3) 872 ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map { case ((b, w), i) => 873 b.register(w.reduce(_ || _), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3) 874 } 875 876 // Send signal tell Ftq override 877 val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire_dup(0)) 878 val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire_dup(0)) 879 880 for (((to_ftq_s1_valid, s1_fire), s1_flush) <- io.bpu_to_ftq.resp.bits.s1.valid zip s1_fire_dup zip s1_flush_dup) { 881 to_ftq_s1_valid := s1_fire && !s1_flush 882 } 883 io.bpu_to_ftq.resp.bits.s1.hasRedirect.map(_ := false.B) 884 io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare 885 for (((to_ftq_s2_valid, s2_fire), s2_flush) <- io.bpu_to_ftq.resp.bits.s2.valid zip s2_fire_dup zip s2_flush_dup) { 886 to_ftq_s2_valid := s2_fire && !s2_flush 887 } 888 io.bpu_to_ftq.resp.bits.s2.hasRedirect.zip(s2_redirect_dup).map { case (hr, r) => hr := r } 889 io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx 890 for (((to_ftq_s3_valid, s3_fire), s3_flush) <- io.bpu_to_ftq.resp.bits.s3.valid zip s3_fire_dup zip s3_flush_dup) { 891 to_ftq_s3_valid := s3_fire && !s3_flush 892 } 893 io.bpu_to_ftq.resp.bits.s3.hasRedirect.zip(s3_redirect_dup).map { case (hr, r) => hr := r } 894 io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx 895 896 predictors.io.update := io.ftq_to_bpu.update 897 predictors.io.update.bits.ghist := getHist(io.ftq_to_bpu.update.bits.spec_info.histPtr) 898 // Move the update pc registers out of predictors. 899 predictors.io.update.bits.pc := SegmentedAddrNext( 900 io.ftq_to_bpu.update.bits.pc, 901 pcSegments, 902 io.ftq_to_bpu.update.valid, 903 Some("predictors_io_update_pc") 904 ).getAddr() 905 906 val redirect_dup = do_redirect_dup.map(_.bits) 907 predictors.io.redirect := do_redirect_dup(0) 908 909 // Redirect logic 910 val shift_dup = redirect_dup.map(_.cfiUpdate.shift) 911 val addIntoHist_dup = redirect_dup.map(_.cfiUpdate.addIntoHist) 912 // TODO: remove these below 913 val shouldShiftVec_dup = shift_dup.map(shift => 914 Mux( 915 shift === 0.U, 916 VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), 917 VecInit(LowerMask(1.U << (shift - 1.U)).asBools) 918 ) 919 ) 920 // TODO end 921 val afhob_dup = redirect_dup.map(_.cfiUpdate.afhob) 922 val lastBrNumOH_dup = redirect_dup.map(_.cfiUpdate.lastBrNumOH) 923 924 val isBr_dup = redirect_dup.map(_.cfiUpdate.pd.isBr) 925 val taken_dup = redirect_dup.map(_.cfiUpdate.taken) 926 val real_br_taken_mask_dup = 927 for (((shift, taken), addIntoHist) <- shift_dup zip taken_dup zip addIntoHist_dup) 928 yield (0 until numBr).map(i => shift === (i + 1).U && taken && addIntoHist) 929 930 val oldPtr_dup = redirect_dup.map(_.cfiUpdate.histPtr) 931 val updated_ptr_dup = oldPtr_dup.zip(shift_dup).map { case (oldPtr, shift) => oldPtr - shift } 932 def computeFoldedHist(hist: UInt, compLen: Int)(histLen: Int): UInt = 933 if (histLen > 0) { 934 val nChunks = (histLen + compLen - 1) / compLen 935 val hist_chunks = (0 until nChunks) map { i => hist(min((i + 1) * compLen, histLen) - 1, i * compLen) } 936 ParallelXOR(hist_chunks) 937 } else 0.U 938 939 val oldFh_dup = dup_seq(WireInit(0.U.asTypeOf(new AllFoldedHistories(foldedGHistInfos)))) 940 oldFh_dup.zip(oldPtr_dup).map { case (oldFh, oldPtr) => 941 foldedGHistInfos.foreach { case (histLen, compLen) => 942 oldFh.getHistWithInfo((histLen, compLen)).folded_hist := computeFoldedHist(getHist(oldPtr), compLen)(histLen) 943 } 944 } 945 946 val updated_fh_dup = 947 for ( 948 ((((oldFh, oldPtr), taken), addIntoHist), shift) <- 949 oldFh_dup zip oldPtr_dup zip taken_dup zip addIntoHist_dup zip shift_dup 950 ) 951 yield VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift) 952 val thisBrNumOH_dup = shift_dup.map(shift => UIntToOH(shift, numBr + 1)) 953 val thisAheadFhOb_dup = dup_wire(new AllAheadFoldedHistoryOldestBits(foldedGHistInfos)) 954 thisAheadFhOb_dup.zip(oldPtr_dup).map { case (afhob, oldPtr) => afhob.read(ghv, oldPtr) } 955 val redirect_ghv_wens = (0 until HistoryLength).map(n => 956 (0 until numBr).map(b => 957 oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b) && do_redirect_dup(0).valid 958 ) 959 ) 960 val redirect_ghv_wdatas = (0 until HistoryLength).map(n => 961 Mux1H( 962 (0 until numBr).map(b => oldPtr_dup(0).value === (CGHPtr(false.B, n.U) + b.U).value && shouldShiftVec_dup(0)(b)), 963 real_br_taken_mask_dup(0) 964 ) 965 ) 966 967 if (EnableGHistDiff) { 968 val updated_ghist = WireInit(getHist(updated_ptr_dup(0)).asTypeOf(Vec(HistoryLength, Bool()))) 969 for (i <- 0 until numBr) { 970 when(shift_dup(0) >= (i + 1).U) { 971 updated_ghist(i) := taken_dup(0) && addIntoHist_dup(0) && (i == 0).B 972 } 973 } 974 when(do_redirect_dup(0).valid) { 975 s0_ghist := updated_ghist.asUInt 976 } 977 } 978 979 // Commit time history checker 980 if (EnableCommitGHistDiff) { 981 val commitGHist = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 982 val commitGHistPtr = RegInit(0.U.asTypeOf(new CGHPtr)) 983 def getCommitHist(ptr: CGHPtr): UInt = 984 (Cat(commitGHist.asUInt, commitGHist.asUInt) >> (ptr.value + 1.U))(HistoryLength - 1, 0) 985 986 val updateValid: Bool = io.ftq_to_bpu.update.valid 987 val branchValidMask: UInt = io.ftq_to_bpu.update.bits.ftb_entry.brValids.asUInt 988 val branchCommittedMask: Vec[Bool] = io.ftq_to_bpu.update.bits.br_committed 989 val misPredictMask: UInt = io.ftq_to_bpu.update.bits.mispred_mask.asUInt 990 val takenMask: UInt = 991 io.ftq_to_bpu.update.bits.br_taken_mask.asUInt | 992 io.ftq_to_bpu.update.bits.ftb_entry.strong_bias.asUInt // Always taken branch is recorded in history 993 val takenIdx: UInt = (PriorityEncoder(takenMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt 994 val misPredictIdx: UInt = (PriorityEncoder(misPredictMask) + 1.U((log2Ceil(numBr) + 1).W)).asUInt 995 val shouldShiftMask: UInt = Mux(takenMask.orR, LowerMask(takenIdx).asUInt, ((1 << numBr) - 1).asUInt) & 996 Mux(misPredictMask.orR, LowerMask(misPredictIdx).asUInt, ((1 << numBr) - 1).asUInt) & 997 branchCommittedMask.asUInt 998 val updateShift: UInt = 999 Mux(updateValid && branchValidMask.orR, PopCount(branchValidMask & shouldShiftMask), 0.U) 1000 1001 // Maintain the commitGHist 1002 for (i <- 0 until numBr) { 1003 when(updateShift >= (i + 1).U) { 1004 val ptr: CGHPtr = commitGHistPtr - i.asUInt 1005 commitGHist(ptr.value) := takenMask(i) 1006 } 1007 } 1008 when(updateValid) { 1009 commitGHistPtr := commitGHistPtr - updateShift 1010 } 1011 1012 // Calculate true history using Parallel XOR 1013 // Do differential 1014 TageTableInfos.map { 1015 case (nRows, histLen, _) => { 1016 val nRowsPerBr = nRows / numBr 1017 val predictGHistPtr = io.ftq_to_bpu.update.bits.spec_info.histPtr 1018 val commitTrueHist: UInt = computeFoldedHist(getCommitHist(commitGHistPtr), log2Ceil(nRowsPerBr))(histLen) 1019 val predictFHist: UInt = computeFoldedHist(getHist(predictGHistPtr), log2Ceil(nRowsPerBr))(histLen) 1020 XSWarn( 1021 updateValid && predictFHist =/= commitTrueHist, 1022 p"predict time ghist: ${predictFHist} is different from commit time: ${commitTrueHist}\n" 1023 ) 1024 } 1025 } 1026 } 1027 1028 // val updatedGh = oldGh.update(shift, taken && addIntoHist) 1029 for ((npcGen, do_redirect) <- npcGen_dup zip do_redirect_dup) 1030 npcGen.register(do_redirect.valid, do_redirect.bits.cfiUpdate.target, Some("redirect_target"), 2) 1031 for (((foldedGhGen, do_redirect), updated_fh) <- foldedGhGen_dup zip do_redirect_dup zip updated_fh_dup) 1032 foldedGhGen.register(do_redirect.valid, updated_fh, Some("redirect_FGHT"), 2) 1033 for (((ghistPtrGen, do_redirect), updated_ptr) <- ghistPtrGen_dup zip do_redirect_dup zip updated_ptr_dup) 1034 ghistPtrGen.register(do_redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2) 1035 for (((lastBrNumOHGen, do_redirect), thisBrNumOH) <- lastBrNumOHGen_dup zip do_redirect_dup zip thisBrNumOH_dup) 1036 lastBrNumOHGen.register(do_redirect.valid, thisBrNumOH, Some("redirect_BrNumOH"), 2) 1037 for (((aheadFhObGen, do_redirect), thisAheadFhOb) <- aheadFhObGen_dup zip do_redirect_dup zip thisAheadFhOb_dup) 1038 aheadFhObGen.register(do_redirect.valid, thisAheadFhOb, Some("redirect_AFHOB"), 2) 1039 ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map { case ((b, w), i) => 1040 b.register(w.reduce(_ || _), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2) 1041 } 1042 // no need to assign s0_last_pred 1043 1044 // val need_reset = RegNext(reset.asBool) && !reset.asBool 1045 1046 // Reset 1047 // npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1) 1048 // foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1) 1049 // ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1) 1050 1051 s0_pc_dup.zip(npcGen_dup).map { case (s0_pc, npcGen) => s0_pc := npcGen() } 1052 s0_folded_gh_dup.zip(foldedGhGen_dup).map { case (s0_folded_gh, foldedGhGen) => s0_folded_gh := foldedGhGen() } 1053 s0_ghist_ptr_dup.zip(ghistPtrGen_dup).map { case (s0_ghist_ptr, ghistPtrGen) => s0_ghist_ptr := ghistPtrGen() } 1054 s0_ahead_fh_oldest_bits_dup.zip(aheadFhObGen_dup).map { case (s0_ahead_fh_oldest_bits, aheadFhObGen) => 1055 s0_ahead_fh_oldest_bits := aheadFhObGen() 1056 } 1057 s0_last_br_num_oh_dup.zip(lastBrNumOHGen_dup).map { case (s0_last_br_num_oh, lastBrNumOHGen) => 1058 s0_last_br_num_oh := lastBrNumOHGen() 1059 } 1060 (ghv_write_datas zip ghvBitWriteGens).map { case (wd, d) => wd := d() } 1061 for (i <- 0 until HistoryLength) { 1062 ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_ || _)).reduce(_ || _) 1063 when(ghv_wens(i)) { 1064 ghv(i) := ghv_write_datas(i) 1065 } 1066 } 1067 1068 // TODO: signals for memVio and other Redirects 1069 controlRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.ControlRedirectBubble 1070 ControlBTBMissBubble := do_redirect_dup(0).bits.ControlBTBMissBubble 1071 TAGEMissBubble := do_redirect_dup(0).bits.TAGEMissBubble 1072 SCMissBubble := do_redirect_dup(0).bits.SCMissBubble 1073 ITTAGEMissBubble := do_redirect_dup(0).bits.ITTAGEMissBubble 1074 RASMissBubble := do_redirect_dup(0).bits.RASMissBubble 1075 1076 memVioRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.MemVioRedirectBubble 1077 otherRedirectBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.OtherRedirectBubble 1078 btbMissBubble := do_redirect_dup(0).valid && do_redirect_dup(0).bits.BTBMissBubble 1079 overrideBubble(0) := s2_redirect_dup(0) 1080 overrideBubble(1) := s3_redirect_dup(0) 1081 ftqUpdateBubble(0) := !s1_components_ready_dup(0) 1082 ftqUpdateBubble(1) := !s2_components_ready_dup(0) 1083 ftqUpdateBubble(2) := !s3_components_ready_dup(0) 1084 ftqFullStall := !io.bpu_to_ftq.resp.ready 1085 io.bpu_to_ftq.resp.bits.topdown_info := topdown_stages(numOfStage - 1) 1086 1087 // topdown handling logic here 1088 when(controlRedirectBubble) { 1089 /* 1090 for (i <- 0 until numOfStage) 1091 topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 1092 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B 1093 */ 1094 when(ControlBTBMissBubble) { 1095 for (i <- 0 until numOfStage) 1096 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 1097 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 1098 }.elsewhen(TAGEMissBubble) { 1099 for (i <- 0 until numOfStage) 1100 topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B 1101 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B 1102 }.elsewhen(SCMissBubble) { 1103 for (i <- 0 until numOfStage) 1104 topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B 1105 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B 1106 }.elsewhen(ITTAGEMissBubble) { 1107 for (i <- 0 until numOfStage) 1108 topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 1109 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 1110 }.elsewhen(RASMissBubble) { 1111 for (i <- 0 until numOfStage) 1112 topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B 1113 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B 1114 } 1115 } 1116 when(memVioRedirectBubble) { 1117 for (i <- 0 until numOfStage) 1118 topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 1119 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 1120 } 1121 when(otherRedirectBubble) { 1122 for (i <- 0 until numOfStage) 1123 topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 1124 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 1125 } 1126 when(btbMissBubble) { 1127 for (i <- 0 until numOfStage) 1128 topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B 1129 io.bpu_to_ftq.resp.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B 1130 } 1131 1132 for (i <- 0 until numOfStage) { 1133 if (i < numOfStage - overrideStage) { 1134 when(overrideBubble(i)) { 1135 for (j <- 0 to i) 1136 topdown_stages(j).reasons(TopDownCounters.OverrideBubble.id) := true.B 1137 } 1138 } 1139 if (i < numOfStage - ftqUpdateStage) { 1140 when(ftqUpdateBubble(i)) { 1141 topdown_stages(i).reasons(TopDownCounters.FtqUpdateBubble.id) := true.B 1142 } 1143 } 1144 } 1145 when(ftqFullStall) { 1146 topdown_stages(0).reasons(TopDownCounters.FtqFullStall.id) := true.B 1147 } 1148 1149 XSError( 1150 isBefore(redirect_dup(0).cfiUpdate.histPtr, s3_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1151 p"s3_ghist_ptr ${s3_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n" 1152 ) 1153 XSError( 1154 isBefore(redirect_dup(0).cfiUpdate.histPtr, s2_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1155 p"s2_ghist_ptr ${s2_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n" 1156 ) 1157 XSError( 1158 isBefore(redirect_dup(0).cfiUpdate.histPtr, s1_ghist_ptr_dup(0)) && do_redirect_dup(0).valid, 1159 p"s1_ghist_ptr ${s1_ghist_ptr_dup(0)} exceeds redirect histPtr ${redirect_dup(0).cfiUpdate.histPtr}\n" 1160 ) 1161 1162 XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n") 1163 XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n") 1164 XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n") 1165 1166 XSDebug("[BP0] fire=%d pc=%x\n", s0_fire_dup(0), s0_pc_dup(0)) 1167 XSDebug( 1168 "[BP1] v=%d r=%d cr=%d fire=%d flush=%d pc=%x\n", 1169 s1_valid_dup(0), 1170 s1_ready_dup(0), 1171 s1_components_ready_dup(0), 1172 s1_fire_dup(0), 1173 s1_flush_dup(0), 1174 s1_pc 1175 ) 1176 XSDebug( 1177 "[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 1178 s2_valid_dup(0), 1179 s2_ready_dup(0), 1180 s2_components_ready_dup(0), 1181 s2_fire_dup(0), 1182 s2_redirect_dup(0), 1183 s2_flush_dup(0), 1184 s2_pc 1185 ) 1186 XSDebug( 1187 "[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 1188 s3_valid_dup(0), 1189 s3_ready_dup(0), 1190 s3_components_ready_dup(0), 1191 s3_fire_dup(0), 1192 s3_redirect_dup(0), 1193 s3_flush_dup(0), 1194 s3_pc 1195 ) 1196 XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready) 1197 XSDebug("resp.s1.target=%x\n", resp.s1.getTarget(0)) 1198 XSDebug("resp.s2.target=%x\n", resp.s2.getTarget(0)) 1199 // XSDebug("s0_ghist: %b\n", s0_ghist.predHist) 1200 // XSDebug("s1_ghist: %b\n", s1_ghist.predHist) 1201 // XSDebug("s2_ghist: %b\n", s2_ghist.predHist) 1202 // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist) 1203 XSDebug(p"s0_ghist_ptr: ${s0_ghist_ptr_dup(0)}\n") 1204 XSDebug(p"s1_ghist_ptr: ${s1_ghist_ptr_dup(0)}\n") 1205 XSDebug(p"s2_ghist_ptr: ${s2_ghist_ptr_dup(0)}\n") 1206 XSDebug(p"s3_ghist_ptr: ${s3_ghist_ptr_dup(0)}\n") 1207 1208 io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid) 1209 io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid) 1210 1211 XSPerfAccumulate("s2_redirect", s2_redirect_dup(0)) 1212 XSPerfAccumulate("s3_redirect", s3_redirect_dup(0)) 1213 XSPerfAccumulate("s1_not_valid", !s1_valid_dup(0)) 1214 1215 val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents 1216 generatePerfEvent() 1217} 1218