1package xiangshan.frontend 2 3import chipsalliance.rocketchip.config.Parameters 4import chisel3._ 5import chisel3.util._ 6import xiangshan._ 7import utils._ 8import chisel3.experimental.chiselName 9import scala.tools.nsc.doc.base.comment.Bold 10 11class RASEntry()(implicit p: Parameters) extends XSBundle { 12 val retAddr = UInt(VAddrBits.W) 13 val ctr = UInt(8.W) // layer of nested call functions 14} 15 16@chiselName 17class RAS(implicit p: Parameters) extends BasePredictor 18{ 19 class RASResp extends Resp 20 { 21 val target =UInt(VAddrBits.W) 22 } 23 24 class RASBranchInfo extends Meta 25 { 26 val rasSp = UInt(log2Up(RasSize).W) 27 val rasTop = new RASEntry 28 } 29 30 class RASIO extends DefaultBasePredictorIO 31 { 32 val is_ret = Input(Bool()) 33 val callIdx = Flipped(ValidIO(UInt(log2Ceil(PredictWidth).W))) 34 val isRVC = Input(Bool()) 35 val isLastHalfRVI = Input(Bool()) 36 val redirect = Flipped(ValidIO(new Redirect)) 37 val out = Output(new RASResp) 38 val meta = Output(new RASBranchInfo) 39 } 40 41 42 def rasEntry() = new RASEntry 43 44 object RASEntry { 45 def apply(retAddr: UInt, ctr: UInt): RASEntry = { 46 val e = Wire(rasEntry()) 47 e.retAddr := retAddr 48 e.ctr := ctr 49 e 50 } 51 } 52 53 override val io = IO(new RASIO) 54 override val debug = true 55 56 @chiselName 57 class RASStack(val rasSize: Int) extends XSModule { 58 val io = IO(new Bundle { 59 val push_valid = Input(Bool()) 60 val pop_valid = Input(Bool()) 61 val spec_new_addr = Input(UInt(VAddrBits.W)) 62 63 val recover_sp = Input(UInt(log2Up(rasSize).W)) 64 val recover_top = Input(rasEntry()) 65 val recover_valid = Input(Bool()) 66 val recover_push = Input(Bool()) 67 val recover_pop = Input(Bool()) 68 val recover_new_addr = Input(UInt(VAddrBits.W)) 69 70 val sp = Output(UInt(log2Up(rasSize).W)) 71 val top = Output(rasEntry()) 72 }) 73 val debugIO = IO(new Bundle{ 74 val push_entry = Output(rasEntry()) 75 val alloc_new = Output(Bool()) 76 val sp = Output(UInt(log2Up(rasSize).W)) 77 val topRegister = Output(rasEntry()) 78 val out_mem = Output(Vec(RasSize, rasEntry())) 79 }) 80 81 val stack = Mem(RasSize, new RASEntry) 82 val sp = RegInit(0.U(log2Up(rasSize).W)) 83 val top = RegInit(0.U.asTypeOf(new RASEntry)) 84 val topPtr = RegInit(0.U(log2Up(rasSize).W)) 85 86 def ptrInc(ptr: UInt) = Mux(ptr === (rasSize-1).U, 0.U, ptr + 1.U) 87 def ptrDec(ptr: UInt) = Mux(ptr === 0.U, (rasSize-1).U, ptr - 1.U) 88 89 val alloc_new = io.spec_new_addr =/= top.retAddr || top.ctr.andR 90 val recover_alloc_new = io.recover_new_addr =/= io.recover_top.retAddr || io.recover_top.ctr.andR 91 92 // TODO: fix overflow and underflow bugs 93 def update(recover: Bool)(do_push: Bool, do_pop: Bool, do_alloc_new: Bool, 94 do_sp: UInt, do_top_ptr: UInt, do_new_addr: UInt, 95 do_top: RASEntry) = { 96 when (do_push) { 97 when (do_alloc_new) { 98 sp := ptrInc(do_sp) 99 topPtr := do_sp 100 top.retAddr := do_new_addr 101 top.ctr := 1.U 102 stack.write(do_sp, RASEntry(do_new_addr, 1.U)) 103 }.otherwise { 104 when (recover) { 105 sp := do_sp 106 topPtr := do_top_ptr 107 top.retAddr := do_top.retAddr 108 } 109 top.ctr := do_top.ctr + 1.U 110 stack.write(do_top_ptr, RASEntry(do_new_addr, do_top.ctr + 1.U)) 111 } 112 }.elsewhen (do_pop) { 113 when (do_top.ctr === 1.U) { 114 sp := ptrDec(do_sp) 115 topPtr := ptrDec(do_top_ptr) 116 top := stack.read(ptrDec(do_top_ptr)) 117 }.otherwise { 118 when (recover) { 119 sp := do_sp 120 topPtr := do_top_ptr 121 top.retAddr := do_top.retAddr 122 } 123 top.ctr := do_top.ctr - 1.U 124 stack.write(do_top_ptr, RASEntry(do_top.retAddr, do_top.ctr - 1.U)) 125 } 126 }.otherwise { 127 when (recover) { 128 sp := do_sp 129 topPtr := do_top_ptr 130 top := do_top 131 stack.write(do_top_ptr, do_top) 132 } 133 } 134 XSPerfAccumulate("ras_overflow", do_push && do_alloc_new && ptrInc(do_sp) === 0.U) 135 XSPerfAccumulate("ras_underflow", do_pop && do_top.ctr === 1.U && ptrDec(do_sp) === (rasSize-1).U) 136 } 137 138 update(io.recover_valid)( 139 Mux(io.recover_valid, io.recover_push, io.push_valid), 140 Mux(io.recover_valid, io.recover_pop, io.pop_valid), 141 Mux(io.recover_valid, recover_alloc_new, alloc_new), 142 Mux(io.recover_valid, io.recover_sp, sp), 143 Mux(io.recover_valid, io.recover_sp - 1.U, topPtr), 144 Mux(io.recover_valid, io.recover_new_addr, io.spec_new_addr), 145 Mux(io.recover_valid, io.recover_top, top)) 146 147 io.sp := sp 148 io.top := top 149 150 debugIO.push_entry := RASEntry(io.spec_new_addr, Mux(alloc_new, 1.U, top.ctr + 1.U)) 151 debugIO.alloc_new := alloc_new 152 debugIO.sp := sp 153 debugIO.topRegister := top 154 for (i <- 0 until RasSize) { 155 debugIO.out_mem(i) := stack.read(i.U) 156 } 157 158 } 159 160 val spec = Module(new RASStack(RasSize)) 161 val spec_ras = spec.io 162 163 164 val spec_push = WireInit(false.B) 165 val spec_pop = WireInit(false.B) 166 val jump_is_first = io.callIdx.bits === 0.U 167 val call_is_last_half = io.isLastHalfRVI && jump_is_first 168 val spec_new_addr = packetAligned(io.pc.bits) + (io.callIdx.bits << instOffsetBits.U) + Mux( (io.isRVC | call_is_last_half) && HasCExtension.B, 2.U, 4.U) 169 spec_ras.push_valid := spec_push 170 spec_ras.pop_valid := spec_pop 171 spec_ras.spec_new_addr := spec_new_addr 172 val spec_top_addr = spec_ras.top.retAddr 173 174 spec_push := io.callIdx.valid && io.pc.valid 175 spec_pop := io.is_ret && io.pc.valid 176 177 val redirect = RegNext(io.redirect) 178 val copy_valid = redirect.valid 179 val recover_cfi = redirect.bits.cfiUpdate 180 181 val retMissPred = copy_valid && redirect.bits.level === 0.U && recover_cfi.pd.isRet 182 val callMissPred = copy_valid && redirect.bits.level === 0.U && recover_cfi.pd.isCall 183 // when we mispredict a call, we must redo a push operation 184 // similarly, when we mispredict a return, we should redo a pop 185 spec_ras.recover_valid := copy_valid 186 spec_ras.recover_push := callMissPred 187 spec_ras.recover_pop := retMissPred 188 189 spec_ras.recover_sp := recover_cfi.rasSp 190 spec_ras.recover_top := recover_cfi.rasEntry 191 spec_ras.recover_new_addr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U) 192 193 io.meta.rasSp := spec_ras.sp 194 io.meta.rasTop := spec_ras.top 195 196 io.out.target := spec_top_addr 197 // TODO: back-up stack for ras 198 // use checkpoint to recover RAS 199 200 if (BPUDebug && debug) { 201 val spec_debug = spec.debugIO 202 XSDebug("----------------RAS----------------\n") 203 XSDebug(" TopRegister: 0x%x %d \n",spec_debug.topRegister.retAddr,spec_debug.topRegister.ctr) 204 XSDebug(" index addr ctr \n") 205 for(i <- 0 until RasSize){ 206 XSDebug(" (%d) 0x%x %d",i.U,spec_debug.out_mem(i).retAddr,spec_debug.out_mem(i).ctr) 207 when(i.U === spec_debug.sp){XSDebug(false,true.B," <----sp")} 208 XSDebug(false,true.B,"\n") 209 } 210 XSDebug(spec_push, "(spec_ras)push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 211 spec_new_addr,spec_debug.push_entry.ctr,spec_debug.alloc_new,spec_debug.sp.asUInt) 212 XSDebug(spec_pop, "(spec_ras)pop outAddr: 0x%x \n",io.out.target) 213 val redirectUpdate = redirect.bits.cfiUpdate 214 XSDebug("copyValid:%d recover(SP:%d retAddr:%x ctr:%d) \n", 215 copy_valid,redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr) 216 } 217 218} 219