1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan._ 24import xiangshan.backend.fu.fpu.FPU 25import xiangshan.backend.rob.RobLsqIO 26import xiangshan.cache._ 27import xiangshan.frontend.FtqPtr 28 29 30class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr]( 31 p => p(XSCoreParamsKey).LoadQueueSize 32){ 33 override def cloneType = (new LqPtr).asInstanceOf[this.type] 34} 35 36object LqPtr { 37 def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = { 38 val ptr = Wire(new LqPtr) 39 ptr.flag := f 40 ptr.value := v 41 ptr 42 } 43} 44 45trait HasLoadHelper { this: XSModule => 46 def rdataHelper(uop: MicroOp, rdata: UInt): UInt = { 47 val fpWen = uop.ctrl.fpWen 48 LookupTree(uop.ctrl.fuOpType, List( 49 LSUOpType.lb -> SignExt(rdata(7, 0) , XLEN), 50 LSUOpType.lh -> SignExt(rdata(15, 0), XLEN), 51 /* 52 riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values 53 Any operation that writes a narrower result to an f register must write 54 all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value. 55 */ 56 LSUOpType.lw -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)), 57 LSUOpType.ld -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)), 58 LSUOpType.lbu -> ZeroExt(rdata(7, 0) , XLEN), 59 LSUOpType.lhu -> ZeroExt(rdata(15, 0), XLEN), 60 LSUOpType.lwu -> ZeroExt(rdata(31, 0), XLEN), 61 )) 62 } 63} 64 65class LqEnqIO(implicit p: Parameters) extends XSBundle { 66 val canAccept = Output(Bool()) 67 val sqCanAccept = Input(Bool()) 68 val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool())) 69 val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp))) 70 val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr)) 71} 72 73// Load Queue 74class LoadQueue(implicit p: Parameters) extends XSModule 75 with HasDCacheParameters 76 with HasCircularQueuePtrHelper 77 with HasLoadHelper 78 with HasPerfEvents 79{ 80 val io = IO(new Bundle() { 81 val enq = new LqEnqIO 82 val brqRedirect = Flipped(ValidIO(new Redirect)) 83 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 84 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 85 val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool())) 86 val needReplayFromRS = Vec(LoadPipelineWidth, Input(Bool())) 87 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load 88 val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed 89 val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO)) 90 val rob = Flipped(new RobLsqIO) 91 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 92 val dcache = Flipped(ValidIO(new Refill)) // TODO: to be renamed 93 val release = Flipped(ValidIO(new Release)) 94 val uncache = new DCacheWordIO 95 val exceptionAddr = new ExceptionAddrIO 96 val lqFull = Output(Bool()) 97 }) 98 99 println("LoadQueue: size:" + LoadQueueSize) 100 101 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 102 // val data = Reg(Vec(LoadQueueSize, new LsRobEntry)) 103 val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth)) 104 dataModule.io := DontCare 105 val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 3, numWrite = LoadPipelineWidth)) 106 vaddrModule.io := DontCare 107 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 108 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 109 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 110 val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache 111 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 112 // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 113 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob 114 val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 115 116 val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst 117 val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst 118 119 val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr)))) 120 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 121 val deqPtrExtNext = Wire(new LqPtr) 122 val allowEnqueue = RegInit(true.B) 123 124 val enqPtr = enqPtrExt(0).value 125 val deqPtr = deqPtrExt.value 126 127 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 128 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 129 130 val commitCount = RegNext(io.rob.lcommit) 131 132 /** 133 * Enqueue at dispatch 134 * 135 * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth 136 */ 137 io.enq.canAccept := allowEnqueue 138 139 for (i <- 0 until io.enq.req.length) { 140 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 141 val lqIdx = enqPtrExt(offset) 142 val index = lqIdx.value 143 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) { 144 uop(index) := io.enq.req(i).bits 145 allocated(index) := true.B 146 datavalid(index) := false.B 147 writebacked(index) := false.B 148 released(index) := false.B 149 miss(index) := false.B 150 // listening(index) := false.B 151 pending(index) := false.B 152 } 153 io.enq.resp(i) := lqIdx 154 } 155 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 156 157 /** 158 * Writeback load from load units 159 * 160 * Most load instructions writeback to regfile at the same time. 161 * However, 162 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 163 * (2) For an mmio instruction without exceptions, it does not write back. 164 * The mmio instruction will be sent to lower level when it reaches ROB's head. 165 * After uncache response, it will write back through arbiter with loadUnit. 166 * (3) For cache misses, it is marked miss and sent to dcache later. 167 * After cache refills, it will write back through arbiter with loadUnit. 168 */ 169 for (i <- 0 until LoadPipelineWidth) { 170 dataModule.io.wb.wen(i) := false.B 171 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 172 when(io.loadIn(i).fire()) { 173 when(io.loadIn(i).bits.miss) { 174 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 175 io.loadIn(i).bits.uop.lqIdx.asUInt, 176 io.loadIn(i).bits.uop.cf.pc, 177 io.loadIn(i).bits.vaddr, 178 io.loadIn(i).bits.paddr, 179 io.loadIn(i).bits.data, 180 io.loadIn(i).bits.mask, 181 io.loadIn(i).bits.forwardData.asUInt, 182 io.loadIn(i).bits.forwardMask.asUInt, 183 io.loadIn(i).bits.mmio 184 ) 185 }.otherwise { 186 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 187 io.loadIn(i).bits.uop.lqIdx.asUInt, 188 io.loadIn(i).bits.uop.cf.pc, 189 io.loadIn(i).bits.vaddr, 190 io.loadIn(i).bits.paddr, 191 io.loadIn(i).bits.data, 192 io.loadIn(i).bits.mask, 193 io.loadIn(i).bits.forwardData.asUInt, 194 io.loadIn(i).bits.forwardMask.asUInt, 195 io.loadIn(i).bits.mmio 196 )} 197 datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) && 198 !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access 199 !io.needReplayFromRS(i) // do not writeback if that inst will be resend from rs 200 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 201 202 val loadWbData = Wire(new LQDataEntry) 203 loadWbData.paddr := io.loadIn(i).bits.paddr 204 loadWbData.mask := io.loadIn(i).bits.mask 205 loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data 206 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 207 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 208 dataModule.io.wb.wen(i) := true.B 209 210 211 debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio 212 debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr 213 214 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 215 miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.needReplayFromRS(i) 216 pending(loadWbIndex) := io.loadIn(i).bits.mmio 217 uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo 218 // update replayInst (replay from fetch) bit, 219 // for replayInst may be set to true in load pipeline 220 uop(loadWbIndex).ctrl.replayInst := io.loadIn(i).bits.uop.ctrl.replayInst 221 } 222 // vaddrModule write is delayed, as vaddrModule will not be read right after write 223 vaddrModule.io.waddr(i) := RegNext(loadWbIndex) 224 vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr) 225 vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire()) 226 } 227 228 when(io.dcache.valid) { 229 XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data) 230 } 231 232 // Refill 64 bit in a cycle 233 // Refill data comes back from io.dcache.resp 234 dataModule.io.refill.valid := io.dcache.valid 235 dataModule.io.refill.paddr := io.dcache.bits.addr 236 dataModule.io.refill.data := io.dcache.bits.data 237 238 (0 until LoadQueueSize).map(i => { 239 dataModule.io.refill.refillMask(i) := allocated(i) && miss(i) 240 when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) { 241 datavalid(i) := true.B 242 miss(i) := false.B 243 refilling(i) := true.B 244 } 245 }) 246 247 // Writeback up to 2 missed load insts to CDB 248 // 249 // Pick 2 missed load (data refilled), write them back to cdb 250 // 2 refilled load will be selected from even/odd entry, separately 251 252 // Stage 0 253 // Generate writeback indexes 254 255 def getEvenBits(input: UInt): UInt = { 256 VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt 257 } 258 def getOddBits(input: UInt): UInt = { 259 VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt 260 } 261 262 val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle 263 val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid 264 265 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 266 allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i)) 267 })).asUInt() // use uint instead vec to reduce verilog lines 268 val evenDeqMask = getEvenBits(deqMask) 269 val oddDeqMask = getOddBits(deqMask) 270 // generate lastCycleSelect mask 271 val evenFireMask = getEvenBits(UIntToOH(loadWbSel(0))) 272 val oddFireMask = getOddBits(UIntToOH(loadWbSel(1))) 273 // generate real select vec 274 def toVec(a: UInt): Vec[Bool] = { 275 VecInit(a.asBools) 276 } 277 val loadEvenSelVecFire = getEvenBits(loadWbSelVec) & ~evenFireMask 278 val loadOddSelVecFire = getOddBits(loadWbSelVec) & ~oddFireMask 279 val loadEvenSelVecNotFire = getEvenBits(loadWbSelVec) 280 val loadOddSelVecNotFire = getOddBits(loadWbSelVec) 281 val loadEvenSel = Mux( 282 io.ldout(0).fire(), 283 getFirstOne(toVec(loadEvenSelVecFire), evenDeqMask), 284 getFirstOne(toVec(loadEvenSelVecNotFire), evenDeqMask) 285 ) 286 val loadOddSel= Mux( 287 io.ldout(1).fire(), 288 getFirstOne(toVec(loadOddSelVecFire), oddDeqMask), 289 getFirstOne(toVec(loadOddSelVecNotFire), oddDeqMask) 290 ) 291 292 293 val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) 294 val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool())) 295 loadWbSelGen(0) := Cat(loadEvenSel, 0.U(1.W)) 296 loadWbSelVGen(0):= Mux(io.ldout(0).fire(), loadEvenSelVecFire.asUInt.orR, loadEvenSelVecNotFire.asUInt.orR) 297 loadWbSelGen(1) := Cat(loadOddSel, 1.U(1.W)) 298 loadWbSelVGen(1) := Mux(io.ldout(1).fire(), loadOddSelVecFire.asUInt.orR, loadOddSelVecNotFire.asUInt.orR) 299 300 (0 until LoadPipelineWidth).map(i => { 301 loadWbSel(i) := RegNext(loadWbSelGen(i)) 302 loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B) 303 when(io.ldout(i).fire()){ 304 // Mark them as writebacked, so they will not be selected in the next cycle 305 writebacked(loadWbSel(i)) := true.B 306 } 307 }) 308 309 // Stage 1 310 // Use indexes generated in cycle 0 to read data 311 // writeback data to cdb 312 (0 until LoadPipelineWidth).map(i => { 313 // data select 314 dataModule.io.wb.raddr(i) := loadWbSelGen(i) 315 val rdata = dataModule.io.wb.rdata(i).data 316 val seluop = uop(loadWbSel(i)) 317 val func = seluop.ctrl.fuOpType 318 val raddr = dataModule.io.wb.rdata(i).paddr 319 val rdataSel = LookupTree(raddr(2, 0), List( 320 "b000".U -> rdata(63, 0), 321 "b001".U -> rdata(63, 8), 322 "b010".U -> rdata(63, 16), 323 "b011".U -> rdata(63, 24), 324 "b100".U -> rdata(63, 32), 325 "b101".U -> rdata(63, 40), 326 "b110".U -> rdata(63, 48), 327 "b111".U -> rdata(63, 56) 328 )) 329 val rdataPartialLoad = rdataHelper(seluop, rdataSel) 330 331 // writeback missed int/fp load 332 // 333 // Int load writeback will finish (if not blocked) in one cycle 334 io.ldout(i).bits.uop := seluop 335 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 336 io.ldout(i).bits.data := rdataPartialLoad 337 io.ldout(i).bits.redirectValid := false.B 338 io.ldout(i).bits.redirect := DontCare 339 io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i)) 340 io.ldout(i).bits.debug.isPerfCnt := false.B 341 io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i)) 342 io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1) 343 io.ldout(i).bits.fflags := DontCare 344 io.ldout(i).valid := loadWbSelV(i) 345 346 when(io.ldout(i).fire()) { 347 XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n", 348 io.ldout(i).bits.uop.robIdx.asUInt, 349 io.ldout(i).bits.uop.lqIdx.asUInt, 350 io.ldout(i).bits.uop.cf.pc, 351 debug_mmio(loadWbSel(i)) 352 ) 353 } 354 355 }) 356 357 /** 358 * Load commits 359 * 360 * When load commited, mark it as !allocated and move deqPtrExt forward. 361 */ 362 (0 until CommitWidth).map(i => { 363 when(commitCount > i.U){ 364 allocated((deqPtrExt+i.U).value) := false.B 365 } 366 }) 367 368 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 369 val length = mask.length 370 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 371 val highBitsUint = Cat(highBits.reverse) 372 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 373 } 374 375 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 376 assert(valid.length == uop.length) 377 assert(valid.length == 2) 378 Mux(valid(0) && valid(1), 379 Mux(isAfter(uop(0).robIdx, uop(1).robIdx), uop(1), uop(0)), 380 Mux(valid(0) && !valid(1), uop(0), uop(1))) 381 } 382 383 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 384 assert(valid.length == uop.length) 385 val length = valid.length 386 (0 until length).map(i => { 387 (0 until length).map(j => { 388 Mux(valid(i) && valid(j), 389 isAfter(uop(i).robIdx, uop(j).robIdx), 390 Mux(!valid(i), true.B, false.B)) 391 }) 392 }) 393 } 394 395 /** 396 * Store-Load Memory violation detection 397 * 398 * When store writes back, it searches LoadQueue for younger load instructions 399 * with the same load physical address. They loaded wrong data and need re-execution. 400 * 401 * Cycle 0: Store Writeback 402 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 403 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 404 * Cycle 1: Redirect Generation 405 * There're three possible types of violations, up to 6 possible redirect requests. 406 * Choose the oldest load (part 1). (4 + 2) -> (1 + 2) 407 * Cycle 2: Redirect Fire 408 * Choose the oldest load (part 2). (3 -> 1) 409 * Prepare redirect request according to the detected violation. 410 * Fire redirect request (if valid) 411 */ 412 413 // stage 0: lq l1 wb l1 wb lq 414 // | | | | | | (paddr match) 415 // stage 1: lq l1 wb l1 wb lq 416 // | | | | | | 417 // | |------------| | 418 // | | | 419 // stage 2: lq l1wb lq 420 // | | | 421 // -------------------- 422 // | 423 // rollback req 424 io.load_s1 := DontCare 425 def detectRollback(i: Int) = { 426 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 427 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 428 val xorMask = lqIdxMask ^ enqMask 429 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 430 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 431 432 // check if load already in lq needs to be rolledback 433 dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr 434 dataModule.io.violation(i).mask := io.storeIn(i).bits.mask 435 val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask) 436 val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => { 437 allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j)) 438 }))) 439 val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => { 440 addrMaskMatch(j) && entryNeedCheck(j) 441 })) 442 val lqViolation = lqViolationVec.asUInt().orR() 443 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 444 val lqViolationUop = uop(lqViolationIndex) 445 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 446 // lqViolationUop.lqIdx.value := lqViolationIndex 447 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 448 449 // when l/s writeback to rob together, check if rollback is needed 450 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 451 io.loadIn(j).valid && 452 isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) && 453 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 454 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 455 }))) 456 val wbViolation = wbViolationVec.asUInt().orR() 457 val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop)))) 458 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 459 460 // check if rollback is needed for load in l1 461 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 462 io.load_s1(j).valid && // L1 valid 463 isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) && 464 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 465 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 466 }))) 467 val l1Violation = l1ViolationVec.asUInt().orR() 468 val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop)))) 469 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 470 471 XSDebug( 472 l1Violation, 473 "need rollback (l1 load) pc %x robidx %d target %x\n", 474 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt 475 ) 476 XSDebug( 477 lqViolation, 478 "need rollback (ld wb before store) pc %x robidx %d target %x\n", 479 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt 480 ) 481 XSDebug( 482 wbViolation, 483 "need rollback (ld/st wb together) pc %x robidx %d target %x\n", 484 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt 485 ) 486 487 ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop)) 488 } 489 490 def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = { 491 Mux( 492 a.valid, 493 Mux( 494 b.valid, 495 Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest 496 a // sel a 497 ), 498 b // sel b 499 ) 500 } 501 val lastCycleRedirect = RegNext(io.brqRedirect) 502 val lastlastCycleRedirect = RegNext(lastCycleRedirect) 503 504 // S2: select rollback (part1) and generate rollback request 505 // rollback check 506 // Wb/L1 rollback seq check is done in s2 507 val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 508 val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 509 val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt))) 510 // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow 511 val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 512 // store ftq index for store set update 513 val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr)) 514 val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W))) 515 for (i <- 0 until StorePipelineWidth) { 516 val detectedRollback = detectRollback(i) 517 rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid) 518 rollbackLq(i).bits.uop := detectedRollback._1._2 519 rollbackLq(i).bits.flag := i.U 520 rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid) 521 rollbackWb(i).bits.uop := detectedRollback._2._2 522 rollbackWb(i).bits.flag := i.U 523 rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid) 524 rollbackL1(i).bits.uop := detectedRollback._3._2 525 rollbackL1(i).bits.flag := i.U 526 rollbackL1Wb(2*i) := rollbackL1(i) 527 rollbackL1Wb(2*i+1) := rollbackWb(i) 528 stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr) 529 stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset) 530 } 531 532 val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel) 533 val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid) 534 val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid) 535 val rollbackLq0VReg = RegNext(rollbackLq(0).valid) 536 val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid) 537 val rollbackLq1VReg = RegNext(rollbackLq(1).valid) 538 val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid) 539 540 // S3: select rollback (part2), generate rollback request, then fire rollback request 541 // Note that we use robIdx - 1.U to flush the load instruction itself. 542 // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect. 543 544 // FIXME: this is ugly 545 val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg) 546 val rollbackUopExtVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg) 547 548 // select uop in parallel 549 val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop)) 550 val oneAfterZero = mask(1)(0) 551 val rollbackUopExt = Mux(oneAfterZero && mask(2)(0), 552 rollbackUopExtVec(0), 553 Mux(!oneAfterZero && mask(2)(1), rollbackUopExtVec(1), rollbackUopExtVec(2))) 554 val stFtqIdxS3 = RegNext(stFtqIdxS2) 555 val stFtqOffsetS3 = RegNext(stFtqOffsetS2) 556 val rollbackUop = rollbackUopExt.uop 557 val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag) 558 val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag) 559 560 // check if rollback request is still valid in parallel 561 val rollbackValidVecChecked = Wire(Vec(3, Bool())) 562 for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) { 563 rollbackValidVecChecked(idx) := v && 564 (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) && 565 (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx)) 566 } 567 568 io.rollback.bits.robIdx := rollbackUop.robIdx 569 io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr 570 io.rollback.bits.stFtqIdx := rollbackStFtqIdx 571 io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset 572 io.rollback.bits.stFtqOffset := rollbackStFtqOffset 573 io.rollback.bits.level := RedirectLevel.flush 574 io.rollback.bits.interrupt := DontCare 575 io.rollback.bits.cfiUpdate := DontCare 576 io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc 577 io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id 578 // io.rollback.bits.pc := DontCare 579 580 io.rollback.valid := rollbackValidVecChecked.asUInt.orR 581 582 when(io.rollback.valid) { 583 // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt) 584 } 585 586 /** 587 * Load-Load Memory violation detection 588 * 589 * When load arrives load_s1, it searches LoadQueue for younger load instructions 590 * with the same load physical address. If younger load has been released (or observed), 591 * the younger load needs to be re-execed. 592 * 593 * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst, 594 * the two loads will be replayed if the older load becomes the head of rob. 595 * 596 * When dcache releases a line, mark all writebacked entrys in load queue with 597 * the same line paddr as released. 598 */ 599 600 // Load-Load Memory violation query 601 val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize) 602 (0 until LoadPipelineWidth).map(i => { 603 dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr 604 io.loadViolationQuery(i).req.ready := true.B 605 io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire()) 606 // Generate real violation mask 607 // Note that we use UIntToMask.rightmask here 608 val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value 609 val lqIdxMask = UIntToMask.rightmask(startIndex, LoadQueueSize) 610 val xorMask = lqIdxMask ^ deqRightMask 611 val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === deqPtrExt.flag 612 val toDeqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 613 val ldld_violation_mask = WireInit(VecInit((0 until LoadQueueSize).map(j => { 614 dataModule.io.release_violation(i).match_mask(j) && // addr match 615 toDeqPtrMask(j) && // the load is younger than current load 616 allocated(j) && // entry is valid 617 released(j) && // cacheline is released 618 (datavalid(j) || miss(j)) // paddr is valid 619 }))) 620 dontTouch(ldld_violation_mask) 621 ldld_violation_mask.suggestName("ldldViolationMask_" + i) 622 io.loadViolationQuery(i).resp.bits.have_violation := RegNext(ldld_violation_mask.asUInt.orR) 623 }) 624 625 // "released" flag update 626 // 627 // When io.release.valid, it uses the last ld-ld paddr cam port to 628 // update release flag in 1 cycle 629 when(io.release.valid){ 630 // Take over ld-ld paddr cam port 631 dataModule.io.release_violation.takeRight(1)(0).paddr := io.release.bits.paddr 632 io.loadViolationQuery.takeRight(1)(0).req.ready := false.B 633 // If a load needs that cam port, replay it from rs 634 } 635 636 (0 until LoadQueueSize).map(i => { 637 when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) && 638 allocated(i) && 639 writebacked(i) && 640 io.release.valid 641 )){ 642 // Note: if a load has missed in dcache and is waiting for refill in load queue, 643 // its released flag still needs to be set as true if addr matches. 644 released(i) := true.B 645 } 646 }) 647 648 /** 649 * Memory mapped IO / other uncached operations 650 * 651 * States: 652 * (1) writeback from store units: mark as pending 653 * (2) when they reach ROB's head, they can be sent to uncache channel 654 * (3) response from uncache channel: mark as datavalid 655 * (4) writeback to ROB (and other units): mark as writebacked 656 * (5) ROB commits the instruction: same as normal instructions 657 */ 658 //(2) when they reach ROB's head, they can be sent to uncache channel 659 val lqTailMmioPending = WireInit(pending(deqPtr)) 660 val lqTailAllocated = WireInit(allocated(deqPtr)) 661 val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4) 662 val uncacheState = RegInit(s_idle) 663 switch(uncacheState) { 664 is(s_idle) { 665 when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) { 666 uncacheState := s_req 667 } 668 } 669 is(s_req) { 670 when(io.uncache.req.fire()) { 671 uncacheState := s_resp 672 } 673 } 674 is(s_resp) { 675 when(io.uncache.resp.fire()) { 676 uncacheState := s_wait 677 } 678 } 679 is(s_wait) { 680 when(RegNext(io.rob.commit)) { 681 uncacheState := s_idle // ready for next mmio 682 } 683 } 684 } 685 io.uncache.req.valid := uncacheState === s_req 686 687 dataModule.io.uncache.raddr := deqPtrExtNext.value 688 689 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 690 io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr 691 io.uncache.req.bits.data := dataModule.io.uncache.rdata.data 692 io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask 693 694 io.uncache.req.bits.id := DontCare 695 io.uncache.req.bits.instrtype := DontCare 696 697 io.uncache.resp.ready := true.B 698 699 when (io.uncache.req.fire()) { 700 pending(deqPtr) := false.B 701 702 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 703 uop(deqPtr).cf.pc, 704 io.uncache.req.bits.addr, 705 io.uncache.req.bits.data, 706 io.uncache.req.bits.cmd, 707 io.uncache.req.bits.mask 708 ) 709 } 710 711 // (3) response from uncache channel: mark as datavalid 712 dataModule.io.uncache.wen := false.B 713 when(io.uncache.resp.fire()){ 714 datavalid(deqPtr) := true.B 715 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 716 dataModule.io.uncache.wen := true.B 717 718 XSDebug("uncache resp: data %x\n", io.dcache.bits.data) 719 } 720 721 // Read vaddr for mem exception 722 // no inst will be commited 1 cycle before tval update 723 vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value 724 io.exceptionAddr.vaddr := vaddrModule.io.rdata(0) 725 726 // Read vaddr for debug trigger 727 (0 until LoadPipelineWidth).map(i => { 728 vaddrModule.io.raddr(i+1) := loadWbSel(i) 729 }) 730 731 732 // misprediction recovery / exception redirect 733 // invalidate lq term using robIdx 734 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 735 for (i <- 0 until LoadQueueSize) { 736 needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) 737 when (needCancel(i)) { 738 allocated(i) := false.B 739 } 740 } 741 742 /** 743 * update pointers 744 */ 745 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 746 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 747 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U) 748 when (lastCycleRedirect.valid) { 749 // we recover the pointers in the next cycle after redirect 750 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 751 }.otherwise { 752 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 753 } 754 755 deqPtrExtNext := deqPtrExt + commitCount 756 deqPtrExt := deqPtrExtNext 757 758 val validCount = distanceBetween(enqPtrExt(0), deqPtrExt) 759 760 allowEnqueue := validCount + enqNumber <= (LoadQueueSize - io.enq.req.length).U 761 762 /** 763 * misc 764 */ 765 // perf counter 766 QueuePerf(LoadQueueSize, validCount, !allowEnqueue) 767 io.lqFull := !allowEnqueue 768 XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated 769 XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req 770 XSPerfAccumulate("mmioCnt", io.uncache.req.fire()) 771 XSPerfAccumulate("refill", io.dcache.valid) 772 XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))) 773 XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))) 774 XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i)))) 775 776 val perfEvents = Seq( 777 ("rollback ", io.rollback.valid ), 778 ("mmioCycle ", uncacheState =/= s_idle ), 779 ("mmio_Cnt ", io.uncache.req.fire() ), 780 ("refill ", io.dcache.valid ), 781 ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))) ), 782 ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))) ), 783 ("ltq_1_4_valid ", (validCount < (LoadQueueSize.U/4.U)) ), 784 ("ltq_2_4_valid ", (validCount > (LoadQueueSize.U/4.U)) & (validCount <= (LoadQueueSize.U/2.U)) ), 785 ("ltq_3_4_valid ", (validCount > (LoadQueueSize.U/2.U)) & (validCount <= (LoadQueueSize.U*3.U/4.U))), 786 ("ltq_4_4_valid ", (validCount > (LoadQueueSize.U*3.U/4.U)) ) 787 ) 788 generatePerfEvent() 789 790 // debug info 791 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 792 793 def PrintFlag(flag: Bool, name: String): Unit = { 794 when(flag) { 795 XSDebug(false, true.B, name) 796 }.otherwise { 797 XSDebug(false, true.B, " ") 798 } 799 } 800 801 for (i <- 0 until LoadQueueSize) { 802 XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i)) 803 PrintFlag(allocated(i), "a") 804 PrintFlag(allocated(i) && datavalid(i), "v") 805 PrintFlag(allocated(i) && writebacked(i), "w") 806 PrintFlag(allocated(i) && miss(i), "m") 807 PrintFlag(allocated(i) && pending(i), "p") 808 XSDebug(false, true.B, "\n") 809 } 810 811} 812