newRAS.scala (c6a44c3566510aac5821fd792766c985fb11f8b1) | newRAS.scala (cf7d6b7a1a781c73aeb87de112de2e7fe5ea3b7c) |
---|---|
1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17package xiangshan.frontend 18 | 1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17package xiangshan.frontend 18 |
19import org.chipsalliance.cde.config.Parameters | |
20import chisel3._ 21import chisel3.util._ | 19import chisel3._ 20import chisel3.util._ |
22import utils._ | 21import org.chipsalliance.cde.config.Parameters |
23import utility._ | 22import utility._ |
23import utils._ |
|
24import xiangshan._ 25import xiangshan.frontend._ 26 27class RASEntry()(implicit p: Parameters) extends XSBundle { | 24import xiangshan._ 25import xiangshan.frontend._ 26 27class RASEntry()(implicit p: Parameters) extends XSBundle { |
28 val retAddr = UInt(VAddrBits.W) 29 val ctr = UInt(RasCtrSize.W) // layer of nested call functions 30 def =/=(that: RASEntry) = this.retAddr =/= that.retAddr || this.ctr =/= that.ctr | 28 val retAddr = UInt(VAddrBits.W) 29 val ctr = UInt(RasCtrSize.W) // layer of nested call functions 30 def =/=(that: RASEntry) = this.retAddr =/= that.retAddr || this.ctr =/= that.ctr |
31} 32 | 31} 32 |
33class RASPtr(implicit p: Parameters) extends CircularQueuePtr[RASPtr]( 34 p => p(XSCoreParamsKey).RasSpecSize 35){ 36} | 33class RASPtr(implicit p: Parameters) extends CircularQueuePtr[RASPtr](p => p(XSCoreParamsKey).RasSpecSize) {} |
37 38object RASPtr { 39 def apply(f: Bool, v: UInt)(implicit p: Parameters): RASPtr = { 40 val ptr = Wire(new RASPtr) | 34 35object RASPtr { 36 def apply(f: Bool, v: UInt)(implicit p: Parameters): RASPtr = { 37 val ptr = Wire(new RASPtr) |
41 ptr.flag := f | 38 ptr.flag := f |
42 ptr.value := v 43 ptr 44 } | 39 ptr.value := v 40 ptr 41 } |
45 def inverse(ptr: RASPtr)(implicit p: Parameters): RASPtr = { | 42 def inverse(ptr: RASPtr)(implicit p: Parameters): RASPtr = |
46 apply(!ptr.flag, ptr.value) | 43 apply(!ptr.flag, ptr.value) |
47 } | |
48} 49 50class RASInternalMeta(implicit p: Parameters) extends XSBundle { | 44} 45 46class RASInternalMeta(implicit p: Parameters) extends XSBundle { |
51 val ssp = UInt(log2Up(RasSize).W) | 47 val ssp = UInt(log2Up(RasSize).W) |
52 val sctr = UInt(RasCtrSize.W) 53 val TOSW = new RASPtr 54 val TOSR = new RASPtr | 48 val sctr = UInt(RasCtrSize.W) 49 val TOSW = new RASPtr 50 val TOSR = new RASPtr |
55 val NOS = new RASPtr | 51 val NOS = new RASPtr |
56} 57 58object RASInternalMeta { | 52} 53 54object RASInternalMeta { |
59 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASInternalMeta = { | 55 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters): RASInternalMeta = { |
60 val e = Wire(new RASInternalMeta) | 56 val e = Wire(new RASInternalMeta) |
61 e.ssp := ssp | 57 e.ssp := ssp |
62 e.TOSW := TOSW 63 e.TOSR := TOSR | 58 e.TOSW := TOSW 59 e.TOSR := TOSR |
64 e.NOS := NOS | 60 e.NOS := NOS |
65 e 66 } 67} 68 69class RASMeta(implicit p: Parameters) extends XSBundle { | 61 e 62 } 63} 64 65class RASMeta(implicit p: Parameters) extends XSBundle { |
70 val ssp = UInt(log2Up(RasSize).W) | 66 val ssp = UInt(log2Up(RasSize).W) |
71 val TOSW = new RASPtr 72} 73 74object RASMeta { | 67 val TOSW = new RASPtr 68} 69 70object RASMeta { |
75 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters):RASMeta = { | 71 def apply(ssp: UInt, sctr: UInt, TOSW: RASPtr, TOSR: RASPtr, NOS: RASPtr)(implicit p: Parameters): RASMeta = { |
76 val e = Wire(new RASMeta) | 72 val e = Wire(new RASMeta) |
77 e.ssp := ssp | 73 e.ssp := ssp |
78 e.TOSW := TOSW 79 e 80 } 81} 82 83class RASDebug(implicit p: Parameters) extends XSBundle { | 74 e.TOSW := TOSW 75 e 76 } 77} 78 79class RASDebug(implicit p: Parameters) extends XSBundle { |
84 val spec_queue = Output(Vec(RasSpecSize, new RASEntry)) 85 val spec_nos = Output(Vec(RasSpecSize, new RASPtr)) | 80 val spec_queue = Output(Vec(RasSpecSize, new RASEntry)) 81 val spec_nos = Output(Vec(RasSpecSize, new RASPtr)) |
86 val commit_stack = Output(Vec(RasSize, new RASEntry)) 87} 88 89class RAS(implicit p: Parameters) extends BasePredictor { 90 override val meta_size = WireInit(0.U.asTypeOf(new RASMeta)).getWidth 91 92 object RASEntry { 93 def apply(retAddr: UInt, ctr: UInt): RASEntry = { 94 val e = Wire(new RASEntry) 95 e.retAddr := retAddr | 82 val commit_stack = Output(Vec(RasSize, new RASEntry)) 83} 84 85class RAS(implicit p: Parameters) extends BasePredictor { 86 override val meta_size = WireInit(0.U.asTypeOf(new RASMeta)).getWidth 87 88 object RASEntry { 89 def apply(retAddr: UInt, ctr: UInt): RASEntry = { 90 val e = Wire(new RASEntry) 91 e.retAddr := retAddr |
96 e.ctr := ctr | 92 e.ctr := ctr |
97 e 98 } 99 } 100 | 93 e 94 } 95 } 96 |
101 | |
102 class RASStack(rasSize: Int, rasSpecSize: Int) extends XSModule with HasCircularQueuePtrHelper { 103 val io = IO(new Bundle { 104 val spec_push_valid = Input(Bool()) | 97 class RASStack(rasSize: Int, rasSpecSize: Int) extends XSModule with HasCircularQueuePtrHelper { 98 val io = IO(new Bundle { 99 val spec_push_valid = Input(Bool()) |
105 val spec_pop_valid = Input(Bool()) 106 val spec_push_addr = Input(UInt(VAddrBits.W)) | 100 val spec_pop_valid = Input(Bool()) 101 val spec_push_addr = Input(UInt(VAddrBits.W)) |
107 // for write bypass between s2 and s3 108 | 102 // for write bypass between s2 and s3 103 |
109 val s2_fire = Input(Bool()) 110 val s3_fire = Input(Bool()) 111 val s3_cancel = Input(Bool()) 112 val s3_meta = Input(new RASInternalMeta) 113 val s3_missed_pop = Input(Bool()) | 104 val s2_fire = Input(Bool()) 105 val s3_fire = Input(Bool()) 106 val s3_cancel = Input(Bool()) 107 val s3_meta = Input(new RASInternalMeta) 108 val s3_missed_pop = Input(Bool()) |
114 val s3_missed_push = Input(Bool()) | 109 val s3_missed_push = Input(Bool()) |
115 val s3_pushAddr = Input(UInt(VAddrBits.W)) 116 val spec_pop_addr = Output(UInt(VAddrBits.W)) | 110 val s3_pushAddr = Input(UInt(VAddrBits.W)) 111 val spec_pop_addr = Output(UInt(VAddrBits.W)) |
117 118 val commit_valid = Input(Bool()) 119 val commit_push_valid = Input(Bool()) | 112 113 val commit_valid = Input(Bool()) 114 val commit_push_valid = Input(Bool()) |
120 val commit_pop_valid = Input(Bool()) 121 val commit_push_addr = Input(UInt(VAddrBits.W)) 122 val commit_meta_TOSW = Input(new RASPtr) | 115 val commit_pop_valid = Input(Bool()) 116 val commit_push_addr = Input(UInt(VAddrBits.W)) 117 val commit_meta_TOSW = Input(new RASPtr) |
123 // for debug purpose only 124 val commit_meta_ssp = Input(UInt(log2Up(RasSize).W)) 125 | 118 // for debug purpose only 119 val commit_meta_ssp = Input(UInt(log2Up(RasSize).W)) 120 |
126 val redirect_valid = Input(Bool()) 127 val redirect_isCall = Input(Bool()) 128 val redirect_isRet = Input(Bool()) 129 val redirect_meta_ssp = Input(UInt(log2Up(RasSize).W)) | 121 val redirect_valid = Input(Bool()) 122 val redirect_isCall = Input(Bool()) 123 val redirect_isRet = Input(Bool()) 124 val redirect_meta_ssp = Input(UInt(log2Up(RasSize).W)) |
130 val redirect_meta_sctr = Input(UInt(RasCtrSize.W)) 131 val redirect_meta_TOSW = Input(new RASPtr) 132 val redirect_meta_TOSR = Input(new RASPtr) | 125 val redirect_meta_sctr = Input(UInt(RasCtrSize.W)) 126 val redirect_meta_TOSW = Input(new RASPtr) 127 val redirect_meta_TOSR = Input(new RASPtr) |
133 val redirect_meta_NOS = Input(new RASPtr) 134 val redirect_callAddr = Input(UInt(VAddrBits.W)) | 128 val redirect_meta_NOS = Input(new RASPtr) 129 val redirect_callAddr = Input(UInt(VAddrBits.W)) |
135 | 130 |
136 val ssp = Output(UInt(log2Up(RasSize).W)) | 131 val ssp = Output(UInt(log2Up(RasSize).W)) |
137 val sctr = Output(UInt(RasCtrSize.W)) | 132 val sctr = Output(UInt(RasCtrSize.W)) |
138 val nsp = Output(UInt(log2Up(RasSize).W)) | 133 val nsp = Output(UInt(log2Up(RasSize).W)) |
139 val TOSR = Output(new RASPtr) 140 val TOSW = Output(new RASPtr) | 134 val TOSR = Output(new RASPtr) 135 val TOSW = Output(new RASPtr) |
141 val NOS = Output(new RASPtr) 142 val BOS = Output(new RASPtr) | 136 val NOS = Output(new RASPtr) 137 val BOS = Output(new RASPtr) |
143 144 val spec_near_overflow = Output(Bool()) 145 146 val debug = new RASDebug 147 }) 148 149 val commit_stack = RegInit(VecInit(Seq.fill(RasSize)(RASEntry(0.U, 0.U)))) | 138 139 val spec_near_overflow = Output(Bool()) 140 141 val debug = new RASDebug 142 }) 143 144 val commit_stack = RegInit(VecInit(Seq.fill(RasSize)(RASEntry(0.U, 0.U)))) |
150 val spec_queue = RegInit(VecInit(Seq.fill(rasSpecSize)(RASEntry(0.U, 0.U)))) 151 val spec_nos = RegInit(VecInit(Seq.fill(rasSpecSize)(RASPtr(false.B, 0.U)))) | 145 val spec_queue = RegInit(VecInit(Seq.fill(rasSpecSize)(RASEntry(0.U, 0.U)))) 146 val spec_nos = RegInit(VecInit(Seq.fill(rasSpecSize)(RASPtr(false.B, 0.U)))) |
152 153 val nsp = RegInit(0.U(log2Up(rasSize).W)) 154 val ssp = RegInit(0.U(log2Up(rasSize).W)) 155 156 val sctr = RegInit(0.U(RasCtrSize.W)) 157 val TOSR = RegInit(RASPtr(true.B, (RasSpecSize - 1).U)) 158 val TOSW = RegInit(RASPtr(false.B, 0.U)) | 147 148 val nsp = RegInit(0.U(log2Up(rasSize).W)) 149 val ssp = RegInit(0.U(log2Up(rasSize).W)) 150 151 val sctr = RegInit(0.U(RasCtrSize.W)) 152 val TOSR = RegInit(RASPtr(true.B, (RasSpecSize - 1).U)) 153 val TOSW = RegInit(RASPtr(false.B, 0.U)) |
159 val BOS = RegInit(RASPtr(false.B, 0.U)) | 154 val BOS = RegInit(RASPtr(false.B, 0.U)) |
160 161 val spec_near_overflowed = RegInit(false.B) 162 163 val writeBypassEntry = Reg(new RASEntry) | 155 156 val spec_near_overflowed = RegInit(false.B) 157 158 val writeBypassEntry = Reg(new RASEntry) |
164 val writeBypassNos = Reg(new RASPtr) | 159 val writeBypassNos = Reg(new RASPtr) |
165 | 160 |
166 val writeBypassValid = RegInit(0.B) | 161 val writeBypassValid = RegInit(0.B) |
167 val writeBypassValidWire = Wire(Bool()) 168 169 def TOSRinRange(currentTOSR: RASPtr, currentTOSW: RASPtr) = { 170 val inflightValid = WireInit(false.B) 171 // if in range, TOSR should be no younger than BOS and strictly younger than TOSW | 162 val writeBypassValidWire = Wire(Bool()) 163 164 def TOSRinRange(currentTOSR: RASPtr, currentTOSW: RASPtr) = { 165 val inflightValid = WireInit(false.B) 166 // if in range, TOSR should be no younger than BOS and strictly younger than TOSW |
172 when (!isBefore(currentTOSR, BOS) && isBefore(currentTOSR, currentTOSW)) { | 167 when(!isBefore(currentTOSR, BOS) && isBefore(currentTOSR, currentTOSW)) { |
173 inflightValid := true.B 174 } 175 inflightValid 176 } 177 | 168 inflightValid := true.B 169 } 170 inflightValid 171 } 172 |
178 def getCommitTop(currentSsp: UInt) = { | 173 def getCommitTop(currentSsp: UInt) = |
179 commit_stack(currentSsp) | 174 commit_stack(currentSsp) |
180 } | |
181 | 175 |
182 def getTopNos(currentTOSR: RASPtr, allowBypass: Boolean):RASPtr = { | 176 def getTopNos(currentTOSR: RASPtr, allowBypass: Boolean): RASPtr = { |
183 val ret = Wire(new RASPtr) | 177 val ret = Wire(new RASPtr) |
184 if (allowBypass){ 185 when (writeBypassValid) { | 178 if (allowBypass) { 179 when(writeBypassValid) { |
186 ret := writeBypassNos | 180 ret := writeBypassNos |
187 } .otherwise { | 181 }.otherwise { |
188 ret := spec_nos(TOSR.value) 189 } 190 } else { 191 ret := spec_nos(TOSR.value) // invalid when TOSR is not in range 192 } 193 ret 194 } 195 | 182 ret := spec_nos(TOSR.value) 183 } 184 } else { 185 ret := spec_nos(TOSR.value) // invalid when TOSR is not in range 186 } 187 ret 188 } 189 |
196 def getTop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, allowBypass: Boolean):RASEntry = { | 190 def getTop( 191 currentSsp: UInt, 192 currentSctr: UInt, 193 currentTOSR: RASPtr, 194 currentTOSW: RASPtr, 195 allowBypass: Boolean 196 ): RASEntry = { |
197 val ret = Wire(new RASEntry) 198 if (allowBypass) { | 197 val ret = Wire(new RASEntry) 198 if (allowBypass) { |
199 when (writeBypassValid) { | 199 when(writeBypassValid) { |
200 ret := writeBypassEntry | 200 ret := writeBypassEntry |
201 } .elsewhen (TOSRinRange(currentTOSR, currentTOSW)) { | 201 }.elsewhen(TOSRinRange(currentTOSR, currentTOSW)) { |
202 ret := spec_queue(currentTOSR.value) | 202 ret := spec_queue(currentTOSR.value) |
203 } .otherwise { | 203 }.otherwise { |
204 ret := getCommitTop(currentSsp) 205 } 206 } else { | 204 ret := getCommitTop(currentSsp) 205 } 206 } else { |
207 when (TOSRinRange(currentTOSR, currentTOSW)) { | 207 when(TOSRinRange(currentTOSR, currentTOSW)) { |
208 ret := spec_queue(currentTOSR.value) | 208 ret := spec_queue(currentTOSR.value) |
209 } .otherwise { | 209 }.otherwise { |
210 ret := getCommitTop(currentSsp) 211 } 212 } 213 214 ret 215 } 216 217 // it would be unsafe for specPtr manipulation if specSize is not power of 2 218 assert(log2Up(RasSpecSize) == log2Floor(RasSpecSize)) 219 def ctrMax = ((1L << RasCtrSize) - 1).U 220 def ptrInc(ptr: UInt) = ptr + 1.U 221 def ptrDec(ptr: UInt) = ptr - 1.U 222 223 def specPtrInc(ptr: RASPtr) = ptr + 1.U 224 def specPtrDec(ptr: RASPtr) = ptr - 1.U 225 | 210 ret := getCommitTop(currentSsp) 211 } 212 } 213 214 ret 215 } 216 217 // it would be unsafe for specPtr manipulation if specSize is not power of 2 218 assert(log2Up(RasSpecSize) == log2Floor(RasSpecSize)) 219 def ctrMax = ((1L << RasCtrSize) - 1).U 220 def ptrInc(ptr: UInt) = ptr + 1.U 221 def ptrDec(ptr: UInt) = ptr - 1.U 222 223 def specPtrInc(ptr: RASPtr) = ptr + 1.U 224 def specPtrDec(ptr: RASPtr) = ptr - 1.U 225 |
226 when (io.redirect_valid && io.redirect_isCall) { | 226 when(io.redirect_valid && io.redirect_isCall) { |
227 writeBypassValidWire := true.B | 227 writeBypassValidWire := true.B |
228 writeBypassValid := true.B 229 } .elsewhen (io.redirect_valid) { | 228 writeBypassValid := true.B 229 }.elsewhen(io.redirect_valid) { |
230 // clear current top writeBypass if doing redirect 231 writeBypassValidWire := false.B | 230 // clear current top writeBypass if doing redirect 231 writeBypassValidWire := false.B |
232 writeBypassValid := false.B 233 } .elsewhen (io.s2_fire) { | 232 writeBypassValid := false.B 233 }.elsewhen(io.s2_fire) { |
234 writeBypassValidWire := io.spec_push_valid | 234 writeBypassValidWire := io.spec_push_valid |
235 writeBypassValid := io.spec_push_valid 236 } .elsewhen (io.s3_fire) { | 235 writeBypassValid := io.spec_push_valid 236 }.elsewhen(io.s3_fire) { |
237 writeBypassValidWire := false.B | 237 writeBypassValidWire := false.B |
238 writeBypassValid := false.B 239 } .otherwise { | 238 writeBypassValid := false.B 239 }.otherwise { |
240 writeBypassValidWire := writeBypassValid 241 } 242 243 val topEntry = getTop(ssp, sctr, TOSR, TOSW, true) | 240 writeBypassValidWire := writeBypassValid 241 } 242 243 val topEntry = getTop(ssp, sctr, TOSR, TOSW, true) |
244 val topNos = getTopNos(TOSR, true) 245 val redirectTopEntry = getTop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, false) | 244 val topNos = getTopNos(TOSR, true) 245 val redirectTopEntry = 246 getTop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, false) |
246 val redirectTopNos = io.redirect_meta_NOS | 247 val redirectTopNos = io.redirect_meta_NOS |
247 val s3TopEntry = getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 248 val s3TopNos = io.s3_meta.NOS | 248 val s3TopEntry = getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) 249 val s3TopNos = io.s3_meta.NOS |
249 250 val writeEntry = Wire(new RASEntry) | 250 251 val writeEntry = Wire(new RASEntry) |
251 val writeNos = Wire(new RASPtr) 252 writeEntry.retAddr := Mux(io.redirect_valid && io.redirect_isCall, io.redirect_callAddr, io.spec_push_addr) 253 writeEntry.ctr := Mux(io.redirect_valid && io.redirect_isCall, 254 Mux(redirectTopEntry.retAddr === io.redirect_callAddr && redirectTopEntry.ctr < ctrMax, io.redirect_meta_sctr + 1.U, 0.U), 255 Mux(topEntry.retAddr === io.spec_push_addr && topEntry.ctr < ctrMax, sctr + 1.U, 0.U)) | 252 val writeNos = Wire(new RASPtr) 253 writeEntry.retAddr := Mux(io.redirect_valid && io.redirect_isCall, io.redirect_callAddr, io.spec_push_addr) 254 writeEntry.ctr := Mux( 255 io.redirect_valid && io.redirect_isCall, 256 Mux( 257 redirectTopEntry.retAddr === io.redirect_callAddr && redirectTopEntry.ctr < ctrMax, 258 io.redirect_meta_sctr + 1.U, 259 0.U 260 ), 261 Mux(topEntry.retAddr === io.spec_push_addr && topEntry.ctr < ctrMax, sctr + 1.U, 0.U) 262 ) |
256 | 263 |
257 writeNos := Mux(io.redirect_valid && io.redirect_isCall, 258 io.redirect_meta_TOSR, TOSR) | 264 writeNos := Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR) |
259 | 265 |
260 when (io.spec_push_valid || (io.redirect_valid && io.redirect_isCall)) { | 266 when(io.spec_push_valid || (io.redirect_valid && io.redirect_isCall)) { |
261 writeBypassEntry := writeEntry | 267 writeBypassEntry := writeEntry |
262 writeBypassNos := writeNos | 268 writeBypassNos := writeNos |
263 } 264 | 269 } 270 |
265 val realPush = Wire(Bool()) | 271 val realPush = Wire(Bool()) |
266 val realWriteEntry = Wire(new RASEntry) | 272 val realWriteEntry = Wire(new RASEntry) |
267 val timingTop = RegInit(0.U.asTypeOf(new RASEntry)) 268 val timingNos = RegInit(0.U.asTypeOf(new RASPtr)) | 273 val timingTop = RegInit(0.U.asTypeOf(new RASEntry)) 274 val timingNos = RegInit(0.U.asTypeOf(new RASPtr)) |
269 | 275 |
270 when (writeBypassValidWire) { 271 when ((io.redirect_valid && io.redirect_isCall) || io.spec_push_valid) { | 276 when(writeBypassValidWire) { 277 when((io.redirect_valid && io.redirect_isCall) || io.spec_push_valid) { |
272 timingTop := writeEntry 273 timingNos := writeNos | 278 timingTop := writeEntry 279 timingNos := writeNos |
274 } .otherwise { | 280 }.otherwise { |
275 timingTop := writeBypassEntry 276 timingNos := writeBypassNos 277 } 278 | 281 timingTop := writeBypassEntry 282 timingNos := writeBypassNos 283 } 284 |
279 } .elsewhen (io.redirect_valid && io.redirect_isRet) { | 285 }.elsewhen(io.redirect_valid && io.redirect_isRet) { |
280 // getTop using redirect Nos as TOSR | 286 // getTop using redirect Nos as TOSR |
281 val popRedSsp = Wire(UInt(log2Up(rasSize).W)) | 287 val popRedSsp = Wire(UInt(log2Up(rasSize).W)) |
282 val popRedSctr = Wire(UInt(RasCtrSize.W)) 283 val popRedTOSR = io.redirect_meta_NOS 284 val popRedTOSW = io.redirect_meta_TOSW 285 | 288 val popRedSctr = Wire(UInt(RasCtrSize.W)) 289 val popRedTOSR = io.redirect_meta_NOS 290 val popRedTOSW = io.redirect_meta_TOSW 291 |
286 when (io.redirect_meta_sctr > 0.U) { | 292 when(io.redirect_meta_sctr > 0.U) { |
287 popRedSctr := io.redirect_meta_sctr - 1.U | 293 popRedSctr := io.redirect_meta_sctr - 1.U |
288 popRedSsp := io.redirect_meta_ssp 289 } .elsewhen (TOSRinRange(popRedTOSR, TOSW)) { 290 popRedSsp := ptrDec(io.redirect_meta_ssp) | 294 popRedSsp := io.redirect_meta_ssp 295 }.elsewhen(TOSRinRange(popRedTOSR, TOSW)) { 296 popRedSsp := ptrDec(io.redirect_meta_ssp) |
291 popRedSctr := spec_queue(popRedTOSR.value).ctr | 297 popRedSctr := spec_queue(popRedTOSR.value).ctr |
292 } .otherwise { 293 popRedSsp := ptrDec(io.redirect_meta_ssp) | 298 }.otherwise { 299 popRedSsp := ptrDec(io.redirect_meta_ssp) |
294 popRedSctr := getCommitTop(ptrDec(io.redirect_meta_ssp)).ctr 295 } 296 // We are deciding top for the next cycle, no need to use bypass here 297 timingTop := getTop(popRedSsp, popRedSctr, popRedTOSR, popRedTOSW, false) | 300 popRedSctr := getCommitTop(ptrDec(io.redirect_meta_ssp)).ctr 301 } 302 // We are deciding top for the next cycle, no need to use bypass here 303 timingTop := getTop(popRedSsp, popRedSctr, popRedTOSR, popRedTOSW, false) |
298 } .elsewhen (io.redirect_valid) { | 304 }.elsewhen(io.redirect_valid) { |
299 // Neither call nor ret | 305 // Neither call nor ret |
300 val popSsp = io.redirect_meta_ssp | 306 val popSsp = io.redirect_meta_ssp |
301 val popSctr = io.redirect_meta_sctr 302 val popTOSR = io.redirect_meta_TOSR 303 val popTOSW = io.redirect_meta_TOSW 304 305 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 306 | 307 val popSctr = io.redirect_meta_sctr 308 val popTOSR = io.redirect_meta_TOSR 309 val popTOSW = io.redirect_meta_TOSW 310 311 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 312 |
307 } .elsewhen (io.spec_pop_valid) { | 313 }.elsewhen(io.spec_pop_valid) { |
308 // getTop using current Nos as TOSR | 314 // getTop using current Nos as TOSR |
309 val popSsp = Wire(UInt(log2Up(rasSize).W)) | 315 val popSsp = Wire(UInt(log2Up(rasSize).W)) |
310 val popSctr = Wire(UInt(RasCtrSize.W)) 311 val popTOSR = topNos 312 val popTOSW = TOSW 313 | 316 val popSctr = Wire(UInt(RasCtrSize.W)) 317 val popTOSR = topNos 318 val popTOSW = TOSW 319 |
314 when (sctr > 0.U) { | 320 when(sctr > 0.U) { |
315 popSctr := sctr - 1.U | 321 popSctr := sctr - 1.U |
316 popSsp := ssp 317 } .elsewhen (TOSRinRange(popTOSR, TOSW)) { 318 popSsp := ptrDec(ssp) | 322 popSsp := ssp 323 }.elsewhen(TOSRinRange(popTOSR, TOSW)) { 324 popSsp := ptrDec(ssp) |
319 popSctr := spec_queue(popTOSR.value).ctr | 325 popSctr := spec_queue(popTOSR.value).ctr |
320 } .otherwise { 321 popSsp := ptrDec(ssp) | 326 }.otherwise { 327 popSsp := ptrDec(ssp) |
322 popSctr := getCommitTop(ptrDec(ssp)).ctr 323 } 324 // We are deciding top for the next cycle, no need to use bypass here 325 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) | 328 popSctr := getCommitTop(ptrDec(ssp)).ctr 329 } 330 // We are deciding top for the next cycle, no need to use bypass here 331 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) |
326 } .elsewhen (realPush) { | 332 }.elsewhen(realPush) { |
327 // just updating spec queue, cannot read from there 328 timingTop := realWriteEntry | 333 // just updating spec queue, cannot read from there 334 timingTop := realWriteEntry |
329 } .elsewhen (io.s3_cancel) { | 335 }.elsewhen(io.s3_cancel) { |
330 // s3 is different with s2 331 timingTop := getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) | 336 // s3 is different with s2 337 timingTop := getTop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, false) |
332 when (io.s3_missed_push) { | 338 when(io.s3_missed_push) { |
333 val writeEntry_s3 = Wire(new RASEntry) | 339 val writeEntry_s3 = Wire(new RASEntry) |
334 timingTop := writeEntry_s3 | 340 timingTop := writeEntry_s3 |
335 writeEntry_s3.retAddr := io.s3_pushAddr | 341 writeEntry_s3.retAddr := io.s3_pushAddr |
336 writeEntry_s3.ctr := Mux(timingTop.retAddr === io.s3_pushAddr && io.s3_meta.sctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) 337 } .elsewhen (io.s3_missed_pop) { 338 val popRedSsp_s3 = Wire(UInt(log2Up(rasSize).W)) | 342 writeEntry_s3.ctr := Mux( 343 timingTop.retAddr === io.s3_pushAddr && io.s3_meta.sctr < ctrMax, 344 io.s3_meta.sctr + 1.U, 345 0.U 346 ) 347 }.elsewhen(io.s3_missed_pop) { 348 val popRedSsp_s3 = Wire(UInt(log2Up(rasSize).W)) |
339 val popRedSctr_s3 = Wire(UInt(RasCtrSize.W)) 340 val popRedTOSR_s3 = io.s3_meta.NOS 341 val popRedTOSW_s3 = io.s3_meta.TOSW 342 | 349 val popRedSctr_s3 = Wire(UInt(RasCtrSize.W)) 350 val popRedTOSR_s3 = io.s3_meta.NOS 351 val popRedTOSW_s3 = io.s3_meta.TOSW 352 |
343 when (io.s3_meta.sctr > 0.U) { | 353 when(io.s3_meta.sctr > 0.U) { |
344 popRedSctr_s3 := io.s3_meta.sctr - 1.U | 354 popRedSctr_s3 := io.s3_meta.sctr - 1.U |
345 popRedSsp_s3 := io.s3_meta.ssp 346 } .elsewhen (TOSRinRange(popRedTOSR_s3, popRedTOSW_s3)) { 347 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) | 355 popRedSsp_s3 := io.s3_meta.ssp 356 }.elsewhen(TOSRinRange(popRedTOSR_s3, popRedTOSW_s3)) { 357 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) |
348 popRedSctr_s3 := spec_queue(popRedTOSR_s3.value).ctr | 358 popRedSctr_s3 := spec_queue(popRedTOSR_s3.value).ctr |
349 } .otherwise { 350 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) | 359 }.otherwise { 360 popRedSsp_s3 := ptrDec(io.s3_meta.ssp) |
351 popRedSctr_s3 := getCommitTop(ptrDec(io.s3_meta.ssp)).ctr 352 } 353 // We are deciding top for the next cycle, no need to use bypass here 354 timingTop := getTop(popRedSsp_s3, popRedSctr_s3, popRedTOSR_s3, popRedTOSW_s3, false) 355 } | 361 popRedSctr_s3 := getCommitTop(ptrDec(io.s3_meta.ssp)).ctr 362 } 363 // We are deciding top for the next cycle, no need to use bypass here 364 timingTop := getTop(popRedSsp_s3, popRedSctr_s3, popRedTOSR_s3, popRedTOSW_s3, false) 365 } |
356 } .otherwise { | 366 }.otherwise { |
357 // easy case | 367 // easy case |
358 val popSsp = ssp | 368 val popSsp = ssp |
359 val popSctr = sctr 360 val popTOSR = TOSR 361 val popTOSW = TOSW 362 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 363 } 364 val diffTop = Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 365 366 XSPerfAccumulate("ras_top_mismatch", diffTop =/= timingTop.retAddr); 367 // could diff when more pop than push and a commit stack is updated with inflight info 368 369 val realWriteEntry_next = RegEnable(writeEntry, io.s2_fire || io.redirect_isCall) | 369 val popSctr = sctr 370 val popTOSR = TOSR 371 val popTOSW = TOSW 372 timingTop := getTop(popSsp, popSctr, popTOSR, popTOSW, false) 373 } 374 val diffTop = Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 375 376 XSPerfAccumulate("ras_top_mismatch", diffTop =/= timingTop.retAddr); 377 // could diff when more pop than push and a commit stack is updated with inflight info 378 379 val realWriteEntry_next = RegEnable(writeEntry, io.s2_fire || io.redirect_isCall) |
370 val s3_missPushEntry = Wire(new RASEntry) 371 val s3_missPushAddr = Wire(new RASPtr) 372 val s3_missPushNos = Wire(new RASPtr) | 380 val s3_missPushEntry = Wire(new RASEntry) 381 val s3_missPushAddr = Wire(new RASPtr) 382 val s3_missPushNos = Wire(new RASPtr) |
373 374 s3_missPushEntry.retAddr := io.s3_pushAddr | 383 384 s3_missPushEntry.retAddr := io.s3_pushAddr |
375 s3_missPushEntry.ctr := Mux(s3TopEntry.retAddr === io.s3_pushAddr && s3TopEntry.ctr < ctrMax, io.s3_meta.sctr + 1.U, 0.U) | 385 s3_missPushEntry.ctr := Mux( 386 s3TopEntry.retAddr === io.s3_pushAddr && s3TopEntry.ctr < ctrMax, 387 io.s3_meta.sctr + 1.U, 388 0.U 389 ) |
376 s3_missPushAddr := io.s3_meta.TOSW | 390 s3_missPushAddr := io.s3_meta.TOSW |
377 s3_missPushNos := io.s3_meta.TOSR | 391 s3_missPushNos := io.s3_meta.TOSR |
378 | 392 |
379 realWriteEntry := Mux(io.redirect_isCall, realWriteEntry_next, 380 Mux(io.s3_missed_push, s3_missPushEntry, 381 realWriteEntry_next)) | 393 realWriteEntry := Mux( 394 io.redirect_isCall, 395 realWriteEntry_next, 396 Mux(io.s3_missed_push, s3_missPushEntry, realWriteEntry_next) 397 ) |
382 | 398 |
383 val realWriteAddr_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSW, TOSW), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 384 val realWriteAddr = Mux(io.redirect_isCall, realWriteAddr_next, 385 Mux(io.s3_missed_push, s3_missPushAddr, 386 realWriteAddr_next)) 387 val realNos_next = RegEnable(Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR), io.s2_fire || (io.redirect_valid && io.redirect_isCall)) 388 val realNos = Mux(io.redirect_isCall, realNos_next, 389 Mux(io.s3_missed_push, s3_missPushNos, 390 realNos_next)) | 399 val realWriteAddr_next = RegEnable( 400 Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSW, TOSW), 401 io.s2_fire || (io.redirect_valid && io.redirect_isCall) 402 ) 403 val realWriteAddr = 404 Mux(io.redirect_isCall, realWriteAddr_next, Mux(io.s3_missed_push, s3_missPushAddr, realWriteAddr_next)) 405 val realNos_next = RegEnable( 406 Mux(io.redirect_valid && io.redirect_isCall, io.redirect_meta_TOSR, TOSR), 407 io.s2_fire || (io.redirect_valid && io.redirect_isCall) 408 ) 409 val realNos = Mux(io.redirect_isCall, realNos_next, Mux(io.s3_missed_push, s3_missPushNos, realNos_next)) |
391 | 410 |
392 realPush := (io.s3_fire && (!io.s3_cancel && RegEnable(io.spec_push_valid, io.s2_fire) || io.s3_missed_push)) || RegNext(io.redirect_valid && io.redirect_isCall) | 411 realPush := (io.s3_fire && (!io.s3_cancel && RegEnable( 412 io.spec_push_valid, 413 io.s2_fire 414 ) || io.s3_missed_push)) || RegNext(io.redirect_valid && io.redirect_isCall) |
393 | 415 |
394 when (realPush) { | 416 when(realPush) { |
395 spec_queue(realWriteAddr.value) := realWriteEntry | 417 spec_queue(realWriteAddr.value) := realWriteEntry |
396 spec_nos(realWriteAddr.value) := realNos | 418 spec_nos(realWriteAddr.value) := realNos |
397 } 398 | 419 } 420 |
399 def specPush(retAddr: UInt, currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, topEntry: RASEntry) = { | 421 def specPush( 422 retAddr: UInt, 423 currentSsp: UInt, 424 currentSctr: UInt, 425 currentTOSR: RASPtr, 426 currentTOSW: RASPtr, 427 topEntry: RASEntry 428 ) = { |
400 TOSR := currentTOSW 401 TOSW := specPtrInc(currentTOSW) 402 // spec sp and ctr should always be maintained | 429 TOSR := currentTOSW 430 TOSW := specPtrInc(currentTOSW) 431 // spec sp and ctr should always be maintained |
403 when (topEntry.retAddr === retAddr && currentSctr < ctrMax) { | 432 when(topEntry.retAddr === retAddr && currentSctr < ctrMax) { |
404 sctr := currentSctr + 1.U | 433 sctr := currentSctr + 1.U |
405 } .otherwise { 406 ssp := ptrInc(currentSsp) | 434 }.otherwise { 435 ssp := ptrInc(currentSsp) |
407 sctr := 0.U 408 } 409 } 410 | 436 sctr := 0.U 437 } 438 } 439 |
411 when (io.spec_push_valid) { | 440 when(io.spec_push_valid) { |
412 specPush(io.spec_push_addr, ssp, sctr, TOSR, TOSW, topEntry) 413 } | 441 specPush(io.spec_push_addr, ssp, sctr, TOSR, TOSW, topEntry) 442 } |
414 def specPop(currentSsp: UInt, currentSctr: UInt, currentTOSR: RASPtr, currentTOSW: RASPtr, currentTopNos: RASPtr) = { | 443 def specPop( 444 currentSsp: UInt, 445 currentSctr: UInt, 446 currentTOSR: RASPtr, 447 currentTOSW: RASPtr, 448 currentTopNos: RASPtr 449 ) = { |
415 // TOSR is only maintained when spec queue is not empty | 450 // TOSR is only maintained when spec queue is not empty |
416 when (TOSRinRange(currentTOSR, currentTOSW)) { | 451 when(TOSRinRange(currentTOSR, currentTOSW)) { |
417 TOSR := currentTopNos 418 } 419 // spec sp and ctr should always be maintained | 452 TOSR := currentTopNos 453 } 454 // spec sp and ctr should always be maintained |
420 when (currentSctr > 0.U) { | 455 when(currentSctr > 0.U) { |
421 sctr := currentSctr - 1.U | 456 sctr := currentSctr - 1.U |
422 } .elsewhen (TOSRinRange(currentTopNos, currentTOSW)) { | 457 }.elsewhen(TOSRinRange(currentTopNos, currentTOSW)) { |
423 // in range, use inflight data | 458 // in range, use inflight data |
424 ssp := ptrDec(currentSsp) | 459 ssp := ptrDec(currentSsp) |
425 sctr := spec_queue(currentTopNos.value).ctr | 460 sctr := spec_queue(currentTopNos.value).ctr |
426 } .otherwise { | 461 }.otherwise { |
427 // NOS not in range, use commit data | 462 // NOS not in range, use commit data |
428 ssp := ptrDec(currentSsp) | 463 ssp := ptrDec(currentSsp) |
429 sctr := getCommitTop(ptrDec(currentSsp)).ctr 430 // in overflow state, we cannot determine the next sctr, sctr here is not accurate 431 } 432 } | 464 sctr := getCommitTop(ptrDec(currentSsp)).ctr 465 // in overflow state, we cannot determine the next sctr, sctr here is not accurate 466 } 467 } |
433 when (io.spec_pop_valid) { | 468 when(io.spec_pop_valid) { |
434 specPop(ssp, sctr, TOSR, TOSW, topNos) 435 } 436 437 // io.spec_pop_addr := Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 438 439 io.spec_pop_addr := timingTop.retAddr | 469 specPop(ssp, sctr, TOSR, TOSW, topNos) 470 } 471 472 // io.spec_pop_addr := Mux(writeBypassValid, writeBypassEntry.retAddr, topEntry.retAddr) 473 474 io.spec_pop_addr := timingTop.retAddr |
440 io.BOS := BOS 441 io.TOSW := TOSW 442 io.TOSR := TOSR 443 io.NOS := topNos 444 io.ssp := ssp 445 io.sctr := sctr 446 io.nsp := nsp | 475 io.BOS := BOS 476 io.TOSW := TOSW 477 io.TOSR := TOSR 478 io.NOS := topNos 479 io.ssp := ssp 480 io.sctr := sctr 481 io.nsp := nsp |
447 | 482 |
448 when (io.s3_cancel) { | 483 when(io.s3_cancel) { |
449 // recovery of all related pointers 450 TOSR := io.s3_meta.TOSR 451 TOSW := io.s3_meta.TOSW | 484 // recovery of all related pointers 485 TOSR := io.s3_meta.TOSR 486 TOSW := io.s3_meta.TOSW |
452 ssp := io.s3_meta.ssp | 487 ssp := io.s3_meta.ssp |
453 sctr := io.s3_meta.sctr 454 455 // for missing pop, we also need to do a pop here | 488 sctr := io.s3_meta.sctr 489 490 // for missing pop, we also need to do a pop here |
456 when (io.s3_missed_pop) { | 491 when(io.s3_missed_pop) { |
457 specPop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, io.s3_meta.NOS) 458 } | 492 specPop(io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, io.s3_meta.NOS) 493 } |
459 when (io.s3_missed_push) { | 494 when(io.s3_missed_push) { |
460 // do not use any bypass from f2 461 specPush(io.s3_pushAddr, io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, s3TopEntry) 462 } 463 } 464 465 val commitTop = commit_stack(nsp) 466 | 495 // do not use any bypass from f2 496 specPush(io.s3_pushAddr, io.s3_meta.ssp, io.s3_meta.sctr, io.s3_meta.TOSR, io.s3_meta.TOSW, s3TopEntry) 497 } 498 } 499 500 val commitTop = commit_stack(nsp) 501 |
467 when (io.commit_pop_valid) { | 502 when(io.commit_pop_valid) { |
468 469 val nsp_update = Wire(UInt(log2Up(rasSize).W)) | 503 504 val nsp_update = Wire(UInt(log2Up(rasSize).W)) |
470 when (io.commit_meta_ssp =/= nsp) { | 505 when(io.commit_meta_ssp =/= nsp) { |
471 // force set nsp to commit ssp to avoid permanent errors 472 nsp_update := io.commit_meta_ssp | 506 // force set nsp to commit ssp to avoid permanent errors 507 nsp_update := io.commit_meta_ssp |
473 } .otherwise { | 508 }.otherwise { |
474 nsp_update := nsp 475 } 476 477 // if ctr > 0, --ctr in stack, otherwise --nsp | 509 nsp_update := nsp 510 } 511 512 // if ctr > 0, --ctr in stack, otherwise --nsp |
478 when (commitTop.ctr > 0.U) { | 513 when(commitTop.ctr > 0.U) { |
479 commit_stack(nsp_update).ctr := commitTop.ctr - 1.U | 514 commit_stack(nsp_update).ctr := commitTop.ctr - 1.U |
480 nsp := nsp_update 481 } .otherwise { | 515 nsp := nsp_update 516 }.otherwise { |
482 nsp := ptrDec(nsp_update); 483 } 484 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 485 } 486 487 val commit_push_addr = spec_queue(io.commit_meta_TOSW.value).retAddr 488 | 517 nsp := ptrDec(nsp_update); 518 } 519 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 520 } 521 522 val commit_push_addr = spec_queue(io.commit_meta_TOSW.value).retAddr 523 |
489 when (io.commit_push_valid) { | 524 when(io.commit_push_valid) { |
490 val nsp_update = Wire(UInt(log2Up(rasSize).W)) | 525 val nsp_update = Wire(UInt(log2Up(rasSize).W)) |
491 when (io.commit_meta_ssp =/= nsp) { | 526 when(io.commit_meta_ssp =/= nsp) { |
492 // force set nsp to commit ssp to avoid permanent errors 493 nsp_update := io.commit_meta_ssp | 527 // force set nsp to commit ssp to avoid permanent errors 528 nsp_update := io.commit_meta_ssp |
494 } .otherwise { | 529 }.otherwise { |
495 nsp_update := nsp 496 } 497 // if ctr < max && topAddr == push addr, ++ctr, otherwise ++nsp | 530 nsp_update := nsp 531 } 532 // if ctr < max && topAddr == push addr, ++ctr, otherwise ++nsp |
498 when (commitTop.ctr < ctrMax && commitTop.retAddr === commit_push_addr) { | 533 when(commitTop.ctr < ctrMax && commitTop.retAddr === commit_push_addr) { |
499 commit_stack(nsp_update).ctr := commitTop.ctr + 1.U | 534 commit_stack(nsp_update).ctr := commitTop.ctr + 1.U |
500 nsp := nsp_update 501 } .otherwise { 502 nsp := ptrInc(nsp_update) | 535 nsp := nsp_update 536 }.otherwise { 537 nsp := ptrInc(nsp_update) |
503 commit_stack(ptrInc(nsp_update)).retAddr := commit_push_addr | 538 commit_stack(ptrInc(nsp_update)).retAddr := commit_push_addr |
504 commit_stack(ptrInc(nsp_update)).ctr := 0.U | 539 commit_stack(ptrInc(nsp_update)).ctr := 0.U |
505 } 506 507 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 508 // XSError(io.commit_push_addr =/= commit_push_addr, "addr from commit mismatch with addr from spec") 509 } 510 | 540 } 541 542 // XSError(io.commit_meta_ssp =/= nsp, "nsp mismatch with expected ssp") 543 // XSError(io.commit_push_addr =/= commit_push_addr, "addr from commit mismatch with addr from spec") 544 } 545 |
511 when (io.commit_push_valid) { | 546 when(io.commit_push_valid) { |
512 BOS := io.commit_meta_TOSW | 547 BOS := io.commit_meta_TOSW |
513 } .elsewhen(io.commit_valid && (distanceBetween(io.commit_meta_TOSW,BOS) > 2.U)) { | 548 }.elsewhen(io.commit_valid && (distanceBetween(io.commit_meta_TOSW, BOS) > 2.U)) { |
514 BOS := specPtrDec(io.commit_meta_TOSW) 515 } 516 | 549 BOS := specPtrDec(io.commit_meta_TOSW) 550 } 551 |
517 when (io.redirect_valid) { | 552 when(io.redirect_valid) { |
518 TOSR := io.redirect_meta_TOSR 519 TOSW := io.redirect_meta_TOSW | 553 TOSR := io.redirect_meta_TOSR 554 TOSW := io.redirect_meta_TOSW |
520 ssp := io.redirect_meta_ssp | 555 ssp := io.redirect_meta_ssp |
521 sctr := io.redirect_meta_sctr 522 | 556 sctr := io.redirect_meta_sctr 557 |
523 when (io.redirect_isCall) { 524 specPush(io.redirect_callAddr, io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopEntry) | 558 when(io.redirect_isCall) { 559 specPush( 560 io.redirect_callAddr, 561 io.redirect_meta_ssp, 562 io.redirect_meta_sctr, 563 io.redirect_meta_TOSR, 564 io.redirect_meta_TOSW, 565 redirectTopEntry 566 ) |
525 } | 567 } |
526 when (io.redirect_isRet) { 527 specPop(io.redirect_meta_ssp, io.redirect_meta_sctr, io.redirect_meta_TOSR, io.redirect_meta_TOSW, redirectTopNos) | 568 when(io.redirect_isRet) { 569 specPop( 570 io.redirect_meta_ssp, 571 io.redirect_meta_sctr, 572 io.redirect_meta_TOSR, 573 io.redirect_meta_TOSW, 574 redirectTopNos 575 ) |
528 } 529 } 530 | 576 } 577 } 578 |
531 when(distanceBetween(TOSW,BOS) > (rasSpecSize - 4).U){ 532 spec_near_overflowed := true.B 533 }.otherwise{ 534 spec_near_overflowed := false.B | 579 when(distanceBetween(TOSW, BOS) > (rasSpecSize - 4).U) { 580 spec_near_overflowed := true.B 581 }.otherwise { 582 spec_near_overflowed := false.B |
535 } 536 | 583 } 584 |
537 io.spec_near_overflow := spec_near_overflowed | 585 io.spec_near_overflow := spec_near_overflowed |
538 XSPerfAccumulate("spec_near_overflow", spec_near_overflowed) | 586 XSPerfAccumulate("spec_near_overflow", spec_near_overflowed) |
539 io.debug.commit_stack.zipWithIndex.foreach{case (a, i) => a := commit_stack(i)} 540 io.debug.spec_nos.zipWithIndex.foreach{case (a, i) => a := spec_nos(i)} 541 io.debug.spec_queue.zipWithIndex.foreach{ case (a, i) => a := spec_queue(i)} | 587 io.debug.commit_stack.zipWithIndex.foreach { case (a, i) => a := commit_stack(i) } 588 io.debug.spec_nos.zipWithIndex.foreach { case (a, i) => a := spec_nos(i) } 589 io.debug.spec_queue.zipWithIndex.foreach { case (a, i) => a := spec_queue(i) } |
542 } 543 544 val stack = Module(new RASStack(RasSize, RasSpecSize)).io 545 546 val s2_spec_push = WireInit(false.B) | 590 } 591 592 val stack = Module(new RASStack(RasSize, RasSpecSize)).io 593 594 val s2_spec_push = WireInit(false.B) |
547 val s2_spec_pop = WireInit(false.B) | 595 val s2_spec_pop = WireInit(false.B) |
548 val s2_full_pred = io.in.bits.resp_in(0).s2.full_pred(2) 549 // when last inst is an rvi call, fall through address would be set to the middle of it, so an addition is needed 550 val s2_spec_new_addr = s2_full_pred.fallThroughAddr + Mux(s2_full_pred.last_may_be_rvi_call, 2.U, 0.U) 551 stack.spec_push_valid := s2_spec_push 552 stack.spec_pop_valid := s2_spec_pop | 596 val s2_full_pred = io.in.bits.resp_in(0).s2.full_pred(2) 597 // when last inst is an rvi call, fall through address would be set to the middle of it, so an addition is needed 598 val s2_spec_new_addr = s2_full_pred.fallThroughAddr + Mux(s2_full_pred.last_may_be_rvi_call, 2.U, 0.U) 599 stack.spec_push_valid := s2_spec_push 600 stack.spec_pop_valid := s2_spec_pop |
553 stack.spec_push_addr := s2_spec_new_addr | 601 stack.spec_push_addr := s2_spec_new_addr |
554 555 // confirm that the call/ret is the taken cfi 556 s2_spec_push := io.s2_fire(2) && s2_full_pred.hit_taken_on_call && !io.s3_redirect(2) | 602 603 // confirm that the call/ret is the taken cfi 604 s2_spec_push := io.s2_fire(2) && s2_full_pred.hit_taken_on_call && !io.s3_redirect(2) |
557 s2_spec_pop := io.s2_fire(2) && s2_full_pred.hit_taken_on_ret && !io.s3_redirect(2) | 605 s2_spec_pop := io.s2_fire(2) && s2_full_pred.hit_taken_on_ret && !io.s3_redirect(2) |
558 | 606 |
559 //val s2_jalr_target = io.out.s2.full_pred.jalr_target 560 //val s2_last_target_in = s2_full_pred.targets.last | 607 // val s2_jalr_target = io.out.s2.full_pred.jalr_target 608 // val s2_last_target_in = s2_full_pred.targets.last |
561 // val s2_last_target_out = io.out.s2.full_pred(2).targets.last 562 val s2_is_jalr = s2_full_pred.is_jalr | 609 // val s2_last_target_out = io.out.s2.full_pred(2).targets.last 610 val s2_is_jalr = s2_full_pred.is_jalr |
563 val s2_is_ret = s2_full_pred.is_ret 564 val s2_top = stack.spec_pop_addr | 611 val s2_is_ret = s2_full_pred.is_ret 612 val s2_top = stack.spec_pop_addr |
565 // assert(is_jalr && is_ret || !is_ret) 566 when(s2_is_ret && io.ctrl.ras_enable) { 567 io.out.s2.full_pred.map(_.jalr_target).foreach(_ := s2_top) 568 // FIXME: should use s1 globally 569 } | 613 // assert(is_jalr && is_ret || !is_ret) 614 when(s2_is_ret && io.ctrl.ras_enable) { 615 io.out.s2.full_pred.map(_.jalr_target).foreach(_ := s2_top) 616 // FIXME: should use s1 globally 617 } |
570 //s2_last_target_out := Mux(s2_is_jalr, s2_jalr_target, s2_last_target_in) 571 io.out.s2.full_pred.zipWithIndex.foreach{ case (a, i) => 572 a.targets.last := Mux(s2_is_jalr, io.out.s2.full_pred(i).jalr_target, io.in.bits.resp_in(0).s2.full_pred(i).targets.last) | 618 // s2_last_target_out := Mux(s2_is_jalr, s2_jalr_target, s2_last_target_in) 619 io.out.s2.full_pred.zipWithIndex.foreach { case (a, i) => 620 a.targets.last := Mux( 621 s2_is_jalr, 622 io.out.s2.full_pred(i).jalr_target, 623 io.in.bits.resp_in(0).s2.full_pred(i).targets.last 624 ) |
573 } 574 575 val s2_meta = Wire(new RASInternalMeta) | 625 } 626 627 val s2_meta = Wire(new RASInternalMeta) |
576 s2_meta.ssp := stack.ssp | 628 s2_meta.ssp := stack.ssp |
577 s2_meta.sctr := stack.sctr 578 s2_meta.TOSR := stack.TOSR 579 s2_meta.TOSW := stack.TOSW | 629 s2_meta.sctr := stack.sctr 630 s2_meta.TOSR := stack.TOSR 631 s2_meta.TOSW := stack.TOSW |
580 s2_meta.NOS := stack.NOS | 632 s2_meta.NOS := stack.NOS |
581 | 633 |
582 val s3_top = RegEnable(stack.spec_pop_addr, io.s2_fire(2)) | 634 val s3_top = RegEnable(stack.spec_pop_addr, io.s2_fire(2)) |
583 val s3_spec_new_addr = RegEnable(s2_spec_new_addr, io.s2_fire(2)) 584 585 // val s3_jalr_target = io.out.s3.full_pred.jalr_target 586 // val s3_last_target_in = io.in.bits.resp_in(0).s3.full_pred(2).targets.last 587 // val s3_last_target_out = io.out.s3.full_pred(2).targets.last | 635 val s3_spec_new_addr = RegEnable(s2_spec_new_addr, io.s2_fire(2)) 636 637 // val s3_jalr_target = io.out.s3.full_pred.jalr_target 638 // val s3_last_target_in = io.in.bits.resp_in(0).s3.full_pred(2).targets.last 639 // val s3_last_target_out = io.out.s3.full_pred(2).targets.last |
588 val s3_is_jalr = io.in.bits.resp_in(0).s3.full_pred(2).is_jalr && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr | 640 val s3_is_jalr = 641 io.in.bits.resp_in(0).s3.full_pred(2).is_jalr && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr |
589 val s3_is_ret = io.in.bits.resp_in(0).s3.full_pred(2).is_ret && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr 590 // assert(is_jalr && is_ret || !is_ret) 591 when(s3_is_ret && io.ctrl.ras_enable) { 592 io.out.s3.full_pred.map(_.jalr_target).foreach(_ := s3_top) 593 // FIXME: should use s1 globally 594 } 595 // s3_last_target_out := Mux(s3_is_jalr, s3_jalr_target, s3_last_target_in) | 642 val s3_is_ret = io.in.bits.resp_in(0).s3.full_pred(2).is_ret && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr 643 // assert(is_jalr && is_ret || !is_ret) 644 when(s3_is_ret && io.ctrl.ras_enable) { 645 io.out.s3.full_pred.map(_.jalr_target).foreach(_ := s3_top) 646 // FIXME: should use s1 globally 647 } 648 // s3_last_target_out := Mux(s3_is_jalr, s3_jalr_target, s3_last_target_in) |
596 io.out.s3.full_pred.zipWithIndex.foreach{ case (a, i) => 597 a.targets.last := Mux(s3_is_jalr, io.out.s3.full_pred(i).jalr_target, io.in.bits.resp_in(0).s3.full_pred(i).targets.last) | 649 io.out.s3.full_pred.zipWithIndex.foreach { case (a, i) => 650 a.targets.last := Mux( 651 s3_is_jalr, 652 io.out.s3.full_pred(i).jalr_target, 653 io.in.bits.resp_in(0).s3.full_pred(i).targets.last 654 ) |
598 } 599 600 val s3_pushed_in_s2 = RegEnable(s2_spec_push, io.s2_fire(2)) | 655 } 656 657 val s3_pushed_in_s2 = RegEnable(s2_spec_push, io.s2_fire(2)) |
601 val s3_popped_in_s2 = RegEnable(s2_spec_pop, io.s2_fire(2)) 602 val s3_push = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_call && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr 603 val s3_pop = io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_ret && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr | 658 val s3_popped_in_s2 = RegEnable(s2_spec_pop, io.s2_fire(2)) 659 val s3_push = 660 io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_call && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr 661 val s3_pop = 662 io.in.bits.resp_in(0).s3.full_pred(2).hit_taken_on_ret && !io.in.bits.resp_in(0).s3.full_pred(2).fallThroughErr |
604 605 val s3_cancel = io.s3_fire(2) && (s3_pushed_in_s2 =/= s3_push || s3_popped_in_s2 =/= s3_pop) 606 stack.s2_fire := io.s2_fire(2) 607 stack.s3_fire := io.s3_fire(2) 608 609 stack.s3_cancel := s3_cancel 610 611 val s3_meta = RegEnable(s2_meta, io.s2_fire(2)) 612 | 663 664 val s3_cancel = io.s3_fire(2) && (s3_pushed_in_s2 =/= s3_push || s3_popped_in_s2 =/= s3_pop) 665 stack.s2_fire := io.s2_fire(2) 666 stack.s3_fire := io.s3_fire(2) 667 668 stack.s3_cancel := s3_cancel 669 670 val s3_meta = RegEnable(s2_meta, io.s2_fire(2)) 671 |
613 stack.s3_meta := s3_meta 614 stack.s3_missed_pop := s3_pop && !s3_popped_in_s2 | 672 stack.s3_meta := s3_meta 673 stack.s3_missed_pop := s3_pop && !s3_popped_in_s2 |
615 stack.s3_missed_push := s3_push && !s3_pushed_in_s2 | 674 stack.s3_missed_push := s3_push && !s3_pushed_in_s2 |
616 stack.s3_pushAddr := s3_spec_new_addr | 675 stack.s3_pushAddr := s3_spec_new_addr |
617 618 // no longer need the top Entry, but TOSR, TOSW, ssp sctr 619 // TODO: remove related signals 620 621 val last_stage_meta = Wire(new RASMeta) | 676 677 // no longer need the top Entry, but TOSR, TOSW, ssp sctr 678 // TODO: remove related signals 679 680 val last_stage_meta = Wire(new RASMeta) |
622 last_stage_meta.ssp := s3_meta.ssp | 681 last_stage_meta.ssp := s3_meta.ssp |
623 last_stage_meta.TOSW := s3_meta.TOSW 624 | 682 last_stage_meta.TOSW := s3_meta.TOSW 683 |
625 io.s1_ready := !stack.spec_near_overflow | 684 io.s1_ready := !stack.spec_near_overflow |
626 | 685 |
627 io.out.last_stage_spec_info.sctr := s3_meta.sctr 628 io.out.last_stage_spec_info.ssp := s3_meta.ssp 629 io.out.last_stage_spec_info.TOSW := s3_meta.TOSW 630 io.out.last_stage_spec_info.TOSR := s3_meta.TOSR 631 io.out.last_stage_spec_info.NOS := s3_meta.NOS | 686 io.out.last_stage_spec_info.sctr := s3_meta.sctr 687 io.out.last_stage_spec_info.ssp := s3_meta.ssp 688 io.out.last_stage_spec_info.TOSW := s3_meta.TOSW 689 io.out.last_stage_spec_info.TOSR := s3_meta.TOSR 690 io.out.last_stage_spec_info.NOS := s3_meta.NOS |
632 io.out.last_stage_spec_info.topAddr := s3_top | 691 io.out.last_stage_spec_info.topAddr := s3_top |
633 io.out.last_stage_meta := last_stage_meta.asUInt | 692 io.out.last_stage_meta := last_stage_meta.asUInt |
634 | 693 |
635 636 val redirect = RegNextWithEnable(io.redirect) 637 val do_recover = redirect.valid | 694 val redirect = RegNextWithEnable(io.redirect) 695 val do_recover = redirect.valid |
638 val recover_cfi = redirect.bits.cfiUpdate 639 640 val retMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isRet 641 val callMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isCall 642 // when we mispredict a call, we must redo a push operation 643 // similarly, when we mispredict a return, we should redo a pop | 696 val recover_cfi = redirect.bits.cfiUpdate 697 698 val retMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isRet 699 val callMissPred = do_recover && redirect.bits.level === 0.U && recover_cfi.pd.isCall 700 // when we mispredict a call, we must redo a push operation 701 // similarly, when we mispredict a return, we should redo a pop |
644 stack.redirect_valid := do_recover 645 stack.redirect_isCall := callMissPred 646 stack.redirect_isRet := retMissPred 647 stack.redirect_meta_ssp := recover_cfi.ssp | 702 stack.redirect_valid := do_recover 703 stack.redirect_isCall := callMissPred 704 stack.redirect_isRet := retMissPred 705 stack.redirect_meta_ssp := recover_cfi.ssp |
648 stack.redirect_meta_sctr := recover_cfi.sctr 649 stack.redirect_meta_TOSW := recover_cfi.TOSW 650 stack.redirect_meta_TOSR := recover_cfi.TOSR | 706 stack.redirect_meta_sctr := recover_cfi.sctr 707 stack.redirect_meta_TOSW := recover_cfi.TOSW 708 stack.redirect_meta_TOSR := recover_cfi.TOSR |
651 stack.redirect_meta_NOS := recover_cfi.NOS 652 stack.redirect_callAddr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U) | 709 stack.redirect_meta_NOS := recover_cfi.NOS 710 stack.redirect_callAddr := recover_cfi.pc + Mux(recover_cfi.pd.isRVC, 2.U, 4.U) |
653 | 711 |
654 val update = io.update.bits 655 val updateMeta = io.update.bits.meta.asTypeOf(new RASMeta) | 712 val update = io.update.bits 713 val updateMeta = io.update.bits.meta.asTypeOf(new RASMeta) |
656 val updateValid = io.update.valid 657 | 714 val updateValid = io.update.valid 715 |
658 stack.commit_valid := updateValid | 716 stack.commit_valid := updateValid |
659 stack.commit_push_valid := updateValid && update.is_call_taken | 717 stack.commit_push_valid := updateValid && update.is_call_taken |
660 stack.commit_pop_valid := updateValid && update.is_ret_taken 661 stack.commit_push_addr := update.ftb_entry.getFallThrough(update.pc) + Mux(update.ftb_entry.last_may_be_rvi_call, 2.U, 0.U) | 718 stack.commit_pop_valid := updateValid && update.is_ret_taken 719 stack.commit_push_addr := update.ftb_entry.getFallThrough(update.pc) + Mux( 720 update.ftb_entry.last_may_be_rvi_call, 721 2.U, 722 0.U 723 ) |
662 stack.commit_meta_TOSW := updateMeta.TOSW | 724 stack.commit_meta_TOSW := updateMeta.TOSW |
663 stack.commit_meta_ssp := updateMeta.ssp | 725 stack.commit_meta_ssp := updateMeta.ssp |
664 | 726 |
665 | |
666 XSPerfAccumulate("ras_s3_cancel", s3_cancel) 667 XSPerfAccumulate("ras_redirect_recover", redirect.valid) 668 XSPerfAccumulate("ras_s3_and_redirect_recover_at_the_same_time", s3_cancel && redirect.valid) 669 | 727 XSPerfAccumulate("ras_s3_cancel", s3_cancel) 728 XSPerfAccumulate("ras_redirect_recover", redirect.valid) 729 XSPerfAccumulate("ras_s3_and_redirect_recover_at_the_same_time", s3_cancel && redirect.valid) 730 |
670 | |
671 val spec_debug = stack.debug 672 XSDebug(io.s2_fire(2), "----------------RAS----------------\n") | 731 val spec_debug = stack.debug 732 XSDebug(io.s2_fire(2), "----------------RAS----------------\n") |
673 XSDebug(io.s2_fire(2), " TopRegister: 0x%x\n",stack.spec_pop_addr) | 733 XSDebug(io.s2_fire(2), " TopRegister: 0x%x\n", stack.spec_pop_addr) |
674 XSDebug(io.s2_fire(2), " index addr ctr nos (spec part)\n") | 734 XSDebug(io.s2_fire(2), " index addr ctr nos (spec part)\n") |
675 for(i <- 0 until RasSpecSize){ 676 XSDebug(io.s2_fire(2), " (%d) 0x%x %d %d",i.U,spec_debug.spec_queue(i).retAddr,spec_debug.spec_queue(i).ctr, spec_debug.spec_nos(i).value) 677 when(i.U === stack.TOSW.value){XSDebug(io.s2_fire(2), " <----TOSW")} 678 when(i.U === stack.TOSR.value){XSDebug(io.s2_fire(2), " <----TOSR")} 679 when(i.U === stack.BOS.value){XSDebug(io.s2_fire(2), " <----BOS")} 680 XSDebug(io.s2_fire(2), "\n") | 735 for (i <- 0 until RasSpecSize) { 736 XSDebug( 737 io.s2_fire(2), 738 " (%d) 0x%x %d %d", 739 i.U, 740 spec_debug.spec_queue(i).retAddr, 741 spec_debug.spec_queue(i).ctr, 742 spec_debug.spec_nos(i).value 743 ) 744 when(i.U === stack.TOSW.value)(XSDebug(io.s2_fire(2), " <----TOSW")) 745 when(i.U === stack.TOSR.value)(XSDebug(io.s2_fire(2), " <----TOSR")) 746 when(i.U === stack.BOS.value)(XSDebug(io.s2_fire(2), " <----BOS")) 747 XSDebug(io.s2_fire(2), "\n") |
681 } 682 XSDebug(io.s2_fire(2), " index addr ctr (committed part)\n") | 748 } 749 XSDebug(io.s2_fire(2), " index addr ctr (committed part)\n") |
683 for(i <- 0 until RasSize){ 684 XSDebug(io.s2_fire(2), " (%d) 0x%x %d",i.U,spec_debug.commit_stack(i).retAddr,spec_debug.commit_stack(i).ctr) 685 when(i.U === stack.ssp){XSDebug(io.s2_fire(2), " <----ssp")} 686 when(i.U === stack.nsp){XSDebug(io.s2_fire(2), " <----nsp")} 687 XSDebug(io.s2_fire(2), "\n") | 750 for (i <- 0 until RasSize) { 751 XSDebug( 752 io.s2_fire(2), 753 " (%d) 0x%x %d", 754 i.U, 755 spec_debug.commit_stack(i).retAddr, 756 spec_debug.commit_stack(i).ctr 757 ) 758 when(i.U === stack.ssp)(XSDebug(io.s2_fire(2), " <----ssp")) 759 when(i.U === stack.nsp)(XSDebug(io.s2_fire(2), " <----nsp")) 760 XSDebug(io.s2_fire(2), "\n") |
688 } 689 /* 690 XSDebug(s2_spec_push, "s2_spec_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 691 s2_spec_new_addr,spec_debug.spec_push_entry.ctr,spec_debug.spec_alloc_new,spec_debug.sp.asUInt) 692 XSDebug(s2_spec_pop, "s2_spec_pop outAddr: 0x%x \n",io.out.s2.getTarget) 693 val s3_recover_entry = spec_debug.recover_push_entry 694 XSDebug(s3_recover && s3_push, "s3_recover_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 695 s3_recover_entry.retAddr, s3_recover_entry.ctr, spec_debug.recover_alloc_new, s3_sp.asUInt) 696 XSDebug(s3_recover && s3_pop, "s3_recover_pop outAddr: 0x%x \n",io.out.s3.getTarget) 697 val redirectUpdate = redirect.bits.cfiUpdate 698 XSDebug(do_recover && callMissPred, "redirect_recover_push\n") 699 XSDebug(do_recover && retMissPred, "redirect_recover_pop\n") 700 XSDebug(do_recover, "redirect_recover(SP:%d retAddr:%x ctr:%d) \n", 701 redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr) | 761 } 762 /* 763 XSDebug(s2_spec_push, "s2_spec_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 764 s2_spec_new_addr,spec_debug.spec_push_entry.ctr,spec_debug.spec_alloc_new,spec_debug.sp.asUInt) 765 XSDebug(s2_spec_pop, "s2_spec_pop outAddr: 0x%x \n",io.out.s2.getTarget) 766 val s3_recover_entry = spec_debug.recover_push_entry 767 XSDebug(s3_recover && s3_push, "s3_recover_push inAddr: 0x%x inCtr: %d | allocNewEntry:%d | sp:%d \n", 768 s3_recover_entry.retAddr, s3_recover_entry.ctr, spec_debug.recover_alloc_new, s3_sp.asUInt) 769 XSDebug(s3_recover && s3_pop, "s3_recover_pop outAddr: 0x%x \n",io.out.s3.getTarget) 770 val redirectUpdate = redirect.bits.cfiUpdate 771 XSDebug(do_recover && callMissPred, "redirect_recover_push\n") 772 XSDebug(do_recover && retMissPred, "redirect_recover_pop\n") 773 XSDebug(do_recover, "redirect_recover(SP:%d retAddr:%x ctr:%d) \n", 774 redirectUpdate.rasSp,redirectUpdate.rasEntry.retAddr,redirectUpdate.rasEntry.ctr) |
702 */ | 775 */ |
703 704 generatePerfEvent() 705} | 776 777 generatePerfEvent() 778} |