1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan.ExceptionNO._ 24import xiangshan._ 25import xiangshan.backend.fu.PMPRespBundle 26import xiangshan.cache._ 27import xiangshan.cache.mmu.{TlbCmd, TlbReq, TlbRequestIO, TlbResp} 28 29class LoadToLsqIO(implicit p: Parameters) extends XSBundle { 30 val loadIn = ValidIO(new LsPipelineBundle) 31 val ldout = Flipped(DecoupledIO(new ExuOutput)) 32 val loadDataForwarded = Output(Bool()) 33 val delayedLoadError = Output(Bool()) 34 val dcacheRequireReplay = Output(Bool()) 35 val forward = new PipeLoadForwardQueryIO 36 val loadViolationQuery = new LoadViolationQueryIO 37 val trigger = Flipped(new LqTriggerIO) 38} 39 40class LoadToLoadIO(implicit p: Parameters) extends XSBundle { 41 // load to load fast path is limited to ld (64 bit) used as vaddr src1 only 42 val data = UInt(XLEN.W) 43 val valid = Bool() 44} 45 46class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle { 47 val tdata2 = Input(UInt(64.W)) 48 val matchType = Input(UInt(2.W)) 49 val tEnable = Input(Bool()) // timing is calculated before this 50 val addrHit = Output(Bool()) 51 val lastDataHit = Output(Bool()) 52} 53 54// Load Pipeline Stage 0 55// Generate addr, use addr to query DCache and DTLB 56class LoadUnit_S0(implicit p: Parameters) extends XSModule with HasDCacheParameters{ 57 val io = IO(new Bundle() { 58 val in = Flipped(Decoupled(new ExuInput)) 59 val out = Decoupled(new LsPipelineBundle) 60 val fastpath = Input(Vec(LoadPipelineWidth, new LoadToLoadIO)) 61 val dtlbReq = DecoupledIO(new TlbReq) 62 val dcacheReq = DecoupledIO(new DCacheWordReq) 63 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 64 val isFirstIssue = Input(Bool()) 65 val loadFastMatch = Input(UInt(exuParameters.LduCnt.W)) 66 }) 67 require(LoadPipelineWidth == exuParameters.LduCnt) 68 69 val s0_uop = io.in.bits.uop 70 val imm12 = WireInit(s0_uop.ctrl.imm(11,0)) 71 72 val s0_vaddr = WireInit(io.in.bits.src(0) + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits)) 73 val s0_mask = WireInit(genWmask(s0_vaddr, s0_uop.ctrl.fuOpType(1,0))) 74 75 if (EnableLoadToLoadForward) { 76 // slow vaddr from non-load insts 77 val slowpath_vaddr = io.in.bits.src(0) + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits) 78 val slowpath_mask = genWmask(slowpath_vaddr, s0_uop.ctrl.fuOpType(1,0)) 79 80 // fast vaddr from load insts 81 val fastpath_vaddrs = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => { 82 io.fastpath(i).data + SignExt(s0_uop.ctrl.imm(11,0), VAddrBits) 83 }))) 84 val fastpath_masks = WireInit(VecInit(List.tabulate(LoadPipelineWidth)(i => { 85 genWmask(fastpath_vaddrs(i), s0_uop.ctrl.fuOpType(1,0)) 86 }))) 87 val fastpath_vaddr = Mux1H(io.loadFastMatch, fastpath_vaddrs) 88 val fastpath_mask = Mux1H(io.loadFastMatch, fastpath_masks) 89 90 // select vaddr from 2 alus 91 s0_vaddr := Mux(io.loadFastMatch.orR, fastpath_vaddr, slowpath_vaddr) 92 s0_mask := Mux(io.loadFastMatch.orR, fastpath_mask, slowpath_mask) 93 XSPerfAccumulate("load_to_load_forward", io.loadFastMatch.orR && io.in.fire()) 94 } 95 96 val isSoftPrefetch = LSUOpType.isPrefetch(s0_uop.ctrl.fuOpType) 97 val isSoftPrefetchRead = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_r 98 val isSoftPrefetchWrite = s0_uop.ctrl.fuOpType === LSUOpType.prefetch_w 99 100 // query DTLB 101 io.dtlbReq.valid := io.in.valid 102 io.dtlbReq.bits.vaddr := s0_vaddr 103 io.dtlbReq.bits.cmd := TlbCmd.read 104 io.dtlbReq.bits.size := LSUOpType.size(io.in.bits.uop.ctrl.fuOpType) 105 io.dtlbReq.bits.robIdx := s0_uop.robIdx 106 io.dtlbReq.bits.debug.pc := s0_uop.cf.pc 107 io.dtlbReq.bits.debug.isFirstIssue := io.isFirstIssue 108 109 // query DCache 110 io.dcacheReq.valid := io.in.valid 111 when (isSoftPrefetchRead) { 112 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFR 113 }.elsewhen (isSoftPrefetchWrite) { 114 io.dcacheReq.bits.cmd := MemoryOpConstants.M_PFW 115 }.otherwise { 116 io.dcacheReq.bits.cmd := MemoryOpConstants.M_XRD 117 } 118 io.dcacheReq.bits.addr := s0_vaddr 119 io.dcacheReq.bits.mask := s0_mask 120 io.dcacheReq.bits.data := DontCare 121 when(isSoftPrefetch) { 122 io.dcacheReq.bits.instrtype := SOFT_PREFETCH.U 123 }.otherwise { 124 io.dcacheReq.bits.instrtype := LOAD_SOURCE.U 125 } 126 127 // TODO: update cache meta 128 io.dcacheReq.bits.id := DontCare 129 130 val addrAligned = LookupTree(s0_uop.ctrl.fuOpType(1, 0), List( 131 "b00".U -> true.B, //b 132 "b01".U -> (s0_vaddr(0) === 0.U), //h 133 "b10".U -> (s0_vaddr(1, 0) === 0.U), //w 134 "b11".U -> (s0_vaddr(2, 0) === 0.U) //d 135 )) 136 137 io.out.valid := io.in.valid && io.dcacheReq.ready 138 139 io.out.bits := DontCare 140 io.out.bits.vaddr := s0_vaddr 141 io.out.bits.mask := s0_mask 142 io.out.bits.uop := s0_uop 143 io.out.bits.uop.cf.exceptionVec(loadAddrMisaligned) := !addrAligned 144 io.out.bits.rsIdx := io.rsIdx 145 io.out.bits.isFirstIssue := io.isFirstIssue 146 io.out.bits.isSoftPrefetch := isSoftPrefetch 147 148 io.in.ready := !io.in.valid || (io.out.ready && io.dcacheReq.ready) 149 150 XSDebug(io.dcacheReq.fire(), 151 p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_uop.cf.pc)}, vaddr ${Hexadecimal(s0_vaddr)}\n" 152 ) 153 XSPerfAccumulate("in_valid", io.in.valid) 154 XSPerfAccumulate("in_fire", io.in.fire) 155 XSPerfAccumulate("in_fire_first_issue", io.in.valid && io.isFirstIssue) 156 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready && io.dcacheReq.ready) 157 XSPerfAccumulate("stall_dcache", io.out.valid && io.out.ready && !io.dcacheReq.ready) 158 XSPerfAccumulate("addr_spec_success", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12)) 159 XSPerfAccumulate("addr_spec_failed", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12)) 160 XSPerfAccumulate("addr_spec_success_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) === io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 161 XSPerfAccumulate("addr_spec_failed_once", io.out.fire() && s0_vaddr(VAddrBits-1, 12) =/= io.in.bits.src(0)(VAddrBits-1, 12) && io.isFirstIssue) 162} 163 164 165// Load Pipeline Stage 1 166// TLB resp (send paddr to dcache) 167class LoadUnit_S1(implicit p: Parameters) extends XSModule { 168 val io = IO(new Bundle() { 169 val in = Flipped(Decoupled(new LsPipelineBundle)) 170 val out = Decoupled(new LsPipelineBundle) 171 val dtlbResp = Flipped(DecoupledIO(new TlbResp)) 172 val dcachePAddr = Output(UInt(PAddrBits.W)) 173 val dcacheKill = Output(Bool()) 174 val fastUopKill = Output(Bool()) 175 val dcacheBankConflict = Input(Bool()) 176 val fullForwardFast = Output(Bool()) 177 val sbuffer = new LoadForwardQueryIO 178 val lsq = new PipeLoadForwardQueryIO 179 val loadViolationQueryReq = Decoupled(new LoadViolationQueryReq) 180 val rsFeedback = ValidIO(new RSFeedback) 181 val csrCtrl = Flipped(new CustomCSRCtrlIO) 182 val needLdVioCheckRedo = Output(Bool()) 183 }) 184 185 val s1_uop = io.in.bits.uop 186 val s1_paddr = io.dtlbResp.bits.paddr 187 // af & pf exception were modified below. 188 val s1_exception = ExceptionNO.selectByFu(io.out.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR 189 val s1_tlb_miss = io.dtlbResp.bits.miss 190 val s1_mask = io.in.bits.mask 191 val s1_bank_conflict = io.dcacheBankConflict 192 193 io.out.bits := io.in.bits // forwardXX field will be updated in s1 194 195 io.dtlbResp.ready := true.B 196 197 // TOOD: PMA check 198 io.dcachePAddr := s1_paddr 199 //io.dcacheKill := s1_tlb_miss || s1_exception || s1_mmio 200 io.dcacheKill := s1_tlb_miss || s1_exception 201 io.fastUopKill := io.dtlbResp.bits.fast_miss || s1_exception 202 203 // load forward query datapath 204 io.sbuffer.valid := io.in.valid && !(s1_exception || s1_tlb_miss) 205 io.sbuffer.vaddr := io.in.bits.vaddr 206 io.sbuffer.paddr := s1_paddr 207 io.sbuffer.uop := s1_uop 208 io.sbuffer.sqIdx := s1_uop.sqIdx 209 io.sbuffer.mask := s1_mask 210 io.sbuffer.pc := s1_uop.cf.pc // FIXME: remove it 211 212 io.lsq.valid := io.in.valid && !(s1_exception || s1_tlb_miss) 213 io.lsq.vaddr := io.in.bits.vaddr 214 io.lsq.paddr := s1_paddr 215 io.lsq.uop := s1_uop 216 io.lsq.sqIdx := s1_uop.sqIdx 217 io.lsq.sqIdxMask := DontCare // will be overwritten by sqIdxMask pre-generated in s0 218 io.lsq.mask := s1_mask 219 io.lsq.pc := s1_uop.cf.pc // FIXME: remove it 220 221 // ld-ld violation query 222 io.loadViolationQueryReq.valid := io.in.valid && !(s1_exception || s1_tlb_miss) 223 io.loadViolationQueryReq.bits.paddr := s1_paddr 224 io.loadViolationQueryReq.bits.uop := s1_uop 225 226 // Generate forwardMaskFast to wake up insts earlier 227 val forwardMaskFast = io.lsq.forwardMaskFast.asUInt | io.sbuffer.forwardMaskFast.asUInt 228 io.fullForwardFast := (~forwardMaskFast & s1_mask) === 0.U 229 230 // Generate feedback signal caused by: 231 // * dcache bank conflict 232 // * need redo ld-ld violation check 233 val needLdVioCheckRedo = io.loadViolationQueryReq.valid && 234 !io.loadViolationQueryReq.ready && 235 RegNext(io.csrCtrl.ldld_vio_check_enable) 236 io.needLdVioCheckRedo := needLdVioCheckRedo 237 io.rsFeedback.valid := io.in.valid && (s1_bank_conflict || needLdVioCheckRedo) 238 io.rsFeedback.bits.hit := false.B // we have found s1_bank_conflict / re do ld-ld violation check 239 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 240 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 241 io.rsFeedback.bits.sourceType := Mux(s1_bank_conflict, RSFeedbackType.bankConflict, RSFeedbackType.ldVioCheckRedo) 242 io.rsFeedback.bits.dataInvalidSqIdx := DontCare 243 244 // if replay is detected in load_s1, 245 // load inst will be canceled immediately 246 io.out.valid := io.in.valid && !io.rsFeedback.valid 247 io.out.bits.paddr := s1_paddr 248 io.out.bits.tlbMiss := s1_tlb_miss 249 250 // current ori test will cause the case of ldest == 0, below will be modifeid in the future. 251 // af & pf exception were modified 252 io.out.bits.uop.cf.exceptionVec(loadPageFault) := io.dtlbResp.bits.excp.pf.ld 253 io.out.bits.uop.cf.exceptionVec(loadAccessFault) := io.dtlbResp.bits.excp.af.ld 254 255 io.out.bits.ptwBack := io.dtlbResp.bits.ptwBack 256 io.out.bits.rsIdx := io.in.bits.rsIdx 257 258 io.out.bits.isSoftPrefetch := io.in.bits.isSoftPrefetch 259 260 io.in.ready := !io.in.valid || io.out.ready 261 262 XSPerfAccumulate("in_valid", io.in.valid) 263 XSPerfAccumulate("in_fire", io.in.fire) 264 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 265 XSPerfAccumulate("tlb_miss", io.in.fire && s1_tlb_miss) 266 XSPerfAccumulate("tlb_miss_first_issue", io.in.fire && s1_tlb_miss && io.in.bits.isFirstIssue) 267 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 268} 269 270// Load Pipeline Stage 2 271// DCache resp 272class LoadUnit_S2(implicit p: Parameters) extends XSModule with HasLoadHelper { 273 val io = IO(new Bundle() { 274 val in = Flipped(Decoupled(new LsPipelineBundle)) 275 val out = Decoupled(new LsPipelineBundle) 276 val rsFeedback = ValidIO(new RSFeedback) 277 val dcacheResp = Flipped(DecoupledIO(new DCacheWordResp)) 278 val pmpResp = Flipped(new PMPRespBundle()) 279 val lsq = new LoadForwardQueryIO 280 val dataInvalidSqIdx = Input(UInt()) 281 val sbuffer = new LoadForwardQueryIO 282 val dataForwarded = Output(Bool()) 283 val dcacheRequireReplay = Output(Bool()) 284 val fullForward = Output(Bool()) 285 val fastpath = Output(new LoadToLoadIO) 286 val dcache_kill = Output(Bool()) 287 val delayedLoadError = Output(Bool()) 288 val loadViolationQueryResp = Flipped(Valid(new LoadViolationQueryResp)) 289 val csrCtrl = Flipped(new CustomCSRCtrlIO) 290 val sentFastUop = Input(Bool()) 291 val static_pm = Input(Valid(Bool())) // valid for static, bits for mmio 292 }) 293 294 val pmp = WireInit(io.pmpResp) 295 when (io.static_pm.valid) { 296 pmp.ld := false.B 297 pmp.st := false.B 298 pmp.instr := false.B 299 pmp.mmio := io.static_pm.bits 300 } 301 302 val s2_is_prefetch = io.in.bits.isSoftPrefetch 303 304 // exception that may cause load addr to be invalid / illegal 305 // 306 // if such exception happen, that inst and its exception info 307 // will be force writebacked to rob 308 val s2_exception_vec = WireInit(io.in.bits.uop.cf.exceptionVec) 309 s2_exception_vec(loadAccessFault) := io.in.bits.uop.cf.exceptionVec(loadAccessFault) || pmp.ld 310 // soft prefetch will not trigger any exception (but ecc error interrupt may be triggered) 311 when (s2_is_prefetch) { 312 s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType) 313 } 314 val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, lduCfg).asUInt.orR 315 316 // writeback access fault caused by ecc error / bus error 317 // 318 // * ecc data error is slow to generate, so we will not use it until load stage 3 319 // * in load stage 3, an extra signal io.load_error will be used to 320 321 // now cache ecc error will raise an access fault 322 // at the same time, error info (including error paddr) will be write to 323 // an customized CSR "CACHE_ERROR" 324 if (EnableAccurateLoadError) { 325 io.delayedLoadError := io.dcacheResp.bits.error_delayed && 326 io.csrCtrl.cache_error_enable && 327 RegNext(io.out.valid) 328 } else { 329 io.delayedLoadError := false.B 330 } 331 332 val actually_mmio = pmp.mmio 333 val s2_uop = io.in.bits.uop 334 val s2_mask = io.in.bits.mask 335 val s2_paddr = io.in.bits.paddr 336 val s2_tlb_miss = io.in.bits.tlbMiss 337 val s2_mmio = !s2_is_prefetch && actually_mmio && !s2_exception 338 val s2_cache_miss = io.dcacheResp.bits.miss 339 val s2_cache_replay = io.dcacheResp.bits.replay 340 val s2_cache_tag_error = io.dcacheResp.bits.tag_error 341 val s2_forward_fail = io.lsq.matchInvalid || io.sbuffer.matchInvalid 342 val s2_ldld_violation = io.loadViolationQueryResp.valid && 343 io.loadViolationQueryResp.bits.have_violation && 344 RegNext(io.csrCtrl.ldld_vio_check_enable) 345 val s2_data_invalid = io.lsq.dataInvalid && !s2_forward_fail && !s2_ldld_violation && !s2_exception 346 347 io.dcache_kill := pmp.ld || pmp.mmio // move pmp resp kill to outside 348 io.dcacheResp.ready := true.B 349 val dcacheShouldResp = !(s2_tlb_miss || s2_exception || s2_mmio || s2_is_prefetch) 350 assert(!(io.in.valid && (dcacheShouldResp && !io.dcacheResp.valid)), "DCache response got lost") 351 352 // merge forward result 353 // lsq has higher priority than sbuffer 354 val forwardMask = Wire(Vec(8, Bool())) 355 val forwardData = Wire(Vec(8, UInt(8.W))) 356 357 val fullForward = (~forwardMask.asUInt & s2_mask) === 0.U && !io.lsq.dataInvalid 358 io.lsq := DontCare 359 io.sbuffer := DontCare 360 io.fullForward := fullForward 361 362 // generate XLEN/8 Muxs 363 for (i <- 0 until XLEN / 8) { 364 forwardMask(i) := io.lsq.forwardMask(i) || io.sbuffer.forwardMask(i) 365 forwardData(i) := Mux(io.lsq.forwardMask(i), io.lsq.forwardData(i), io.sbuffer.forwardData(i)) 366 } 367 368 XSDebug(io.out.fire(), "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n", 369 s2_uop.cf.pc, 370 io.lsq.forwardData.asUInt, io.lsq.forwardMask.asUInt, 371 io.in.bits.forwardData.asUInt, io.in.bits.forwardMask.asUInt 372 ) 373 374 // data merge 375 val rdataVec = VecInit((0 until XLEN / 8).map(j => 376 Mux(forwardMask(j), forwardData(j), io.dcacheResp.bits.data(8*(j+1)-1, 8*j)))) 377 val rdata = rdataVec.asUInt 378 val rdataSel = LookupTree(s2_paddr(2, 0), List( 379 "b000".U -> rdata(63, 0), 380 "b001".U -> rdata(63, 8), 381 "b010".U -> rdata(63, 16), 382 "b011".U -> rdata(63, 24), 383 "b100".U -> rdata(63, 32), 384 "b101".U -> rdata(63, 40), 385 "b110".U -> rdata(63, 48), 386 "b111".U -> rdata(63, 56) 387 )) 388 val rdataPartialLoad = rdataHelper(s2_uop, rdataSel) 389 390 io.out.valid := io.in.valid && !s2_tlb_miss && !s2_data_invalid 391 // Inst will be canceled in store queue / lsq, 392 // so we do not need to care about flush in load / store unit's out.valid 393 io.out.bits := io.in.bits 394 io.out.bits.data := rdataPartialLoad 395 // when exception occurs, set it to not miss and let it write back to rob (via int port) 396 if (EnableFastForward) { 397 io.out.bits.miss := s2_cache_miss && 398 !s2_exception && 399 !s2_forward_fail && 400 !s2_ldld_violation && 401 !fullForward && 402 !s2_is_prefetch 403 } else { 404 io.out.bits.miss := s2_cache_miss && 405 !s2_exception && 406 !s2_forward_fail && 407 !s2_ldld_violation && 408 !s2_is_prefetch 409 } 410 io.out.bits.uop.ctrl.fpWen := io.in.bits.uop.ctrl.fpWen && !s2_exception 411 // if forward fail, replay this inst from fetch 412 val forwardFailReplay = s2_forward_fail && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 413 // if ld-ld violation is detected, replay from this inst from fetch 414 val ldldVioReplay = s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 415 val s2_need_replay_from_fetch = (s2_forward_fail || s2_ldld_violation) && !s2_mmio && !s2_is_prefetch && !s2_tlb_miss 416 io.out.bits.uop.ctrl.replayInst := s2_need_replay_from_fetch 417 io.out.bits.mmio := s2_mmio 418 io.out.bits.uop.ctrl.flushPipe := s2_mmio && io.sentFastUop 419 io.out.bits.uop.cf.exceptionVec := s2_exception_vec // cache error not included 420 421 // For timing reasons, sometimes we can not let 422 // io.out.bits.miss := s2_cache_miss && !s2_exception && !fullForward 423 // We use io.dataForwarded instead. It means: 424 // 1. Forward logic have prepared all data needed, 425 // and dcache query is no longer needed. 426 // 2. ... or data cache tag error is detected, this kind of inst 427 // will not update miss queue. That is to say, if miss, that inst 428 // may not be refilled 429 // Such inst will be writebacked from load queue. 430 io.dataForwarded := s2_cache_miss && !s2_exception && !s2_forward_fail && 431 (fullForward || io.csrCtrl.cache_error_enable && s2_cache_tag_error) 432 // io.out.bits.forwardX will be send to lq 433 io.out.bits.forwardMask := forwardMask 434 // data retbrived from dcache is also included in io.out.bits.forwardData 435 io.out.bits.forwardData := rdataVec 436 437 io.in.ready := io.out.ready || !io.in.valid 438 439 // feedback tlb result to RS 440 io.rsFeedback.valid := io.in.valid 441 val s2_need_replay_from_rs = Wire(Bool()) 442 if (EnableFastForward) { 443 s2_need_replay_from_rs := 444 s2_tlb_miss || // replay if dtlb miss 445 s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !fullForward || // replay if dcache miss queue full / busy 446 s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready 447 } else { 448 // Note that if all parts of data are available in sq / sbuffer, replay required by dcache will not be scheduled 449 s2_need_replay_from_rs := 450 s2_tlb_miss || // replay if dtlb miss 451 s2_cache_replay && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation && !s2_mmio && !s2_exception && !io.dataForwarded || // replay if dcache miss queue full / busy 452 s2_data_invalid && !s2_is_prefetch && !s2_forward_fail && !s2_ldld_violation // replay if store to load forward data is not ready 453 } 454 assert(!RegNext(io.in.valid && s2_need_replay_from_rs && s2_need_replay_from_fetch)) 455 io.rsFeedback.bits.hit := !s2_need_replay_from_rs 456 io.rsFeedback.bits.rsIdx := io.in.bits.rsIdx 457 io.rsFeedback.bits.flushState := io.in.bits.ptwBack 458 // feedback source priority: tlbMiss > dataInvalid > mshrFull 459 // general case priority: tlbMiss > exception (include forward_fail / ldld_violation) > mmio > dataInvalid > mshrFull > normal miss / hit 460 io.rsFeedback.bits.sourceType := Mux(s2_tlb_miss, RSFeedbackType.tlbMiss, 461 Mux(s2_data_invalid, 462 RSFeedbackType.dataInvalid, 463 RSFeedbackType.mshrFull 464 ) 465 ) 466 io.rsFeedback.bits.dataInvalidSqIdx.value := io.dataInvalidSqIdx 467 io.rsFeedback.bits.dataInvalidSqIdx.flag := DontCare 468 469 // s2_cache_replay is quite slow to generate, send it separately to LQ 470 if (EnableFastForward) { 471 io.dcacheRequireReplay := s2_cache_replay && !fullForward 472 } else { 473 io.dcacheRequireReplay := s2_cache_replay && 474 !io.rsFeedback.bits.hit && 475 !io.dataForwarded && 476 !s2_is_prefetch && 477 io.out.bits.miss 478 } 479 480 // fast load to load forward 481 io.fastpath.valid := RegNext(io.out.valid) // for debug only 482 io.fastpath.data := RegNext(io.out.bits.data) 483 484 485 XSDebug(io.out.fire(), "[DCACHE LOAD RESP] pc %x rdata %x <- D$ %x + fwd %x(%b)\n", 486 s2_uop.cf.pc, rdataPartialLoad, io.dcacheResp.bits.data, 487 forwardData.asUInt, forwardMask.asUInt 488 ) 489 490 XSPerfAccumulate("in_valid", io.in.valid) 491 XSPerfAccumulate("in_fire", io.in.fire) 492 XSPerfAccumulate("in_fire_first_issue", io.in.fire && io.in.bits.isFirstIssue) 493 XSPerfAccumulate("dcache_miss", io.in.fire && s2_cache_miss) 494 XSPerfAccumulate("dcache_miss_first_issue", io.in.fire && s2_cache_miss && io.in.bits.isFirstIssue) 495 XSPerfAccumulate("full_forward", io.in.valid && fullForward) 496 XSPerfAccumulate("dcache_miss_full_forward", io.in.valid && s2_cache_miss && fullForward) 497 XSPerfAccumulate("replay", io.rsFeedback.valid && !io.rsFeedback.bits.hit) 498 XSPerfAccumulate("replay_tlb_miss", io.rsFeedback.valid && !io.rsFeedback.bits.hit && s2_tlb_miss) 499 XSPerfAccumulate("replay_cache", io.rsFeedback.valid && !io.rsFeedback.bits.hit && !s2_tlb_miss && s2_cache_replay) 500 XSPerfAccumulate("stall_out", io.out.valid && !io.out.ready) 501 XSPerfAccumulate("replay_from_fetch_forward", io.out.valid && forwardFailReplay) 502 XSPerfAccumulate("replay_from_fetch_load_vio", io.out.valid && ldldVioReplay) 503} 504 505class LoadUnit(implicit p: Parameters) extends XSModule 506 with HasLoadHelper 507 with HasPerfEvents 508 with HasDCacheParameters 509{ 510 val io = IO(new Bundle() { 511 val ldin = Flipped(Decoupled(new ExuInput)) 512 val ldout = Decoupled(new ExuOutput) 513 val redirect = Flipped(ValidIO(new Redirect)) 514 val feedbackSlow = ValidIO(new RSFeedback) 515 val feedbackFast = ValidIO(new RSFeedback) 516 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 517 val isFirstIssue = Input(Bool()) 518 val dcache = new DCacheLoadIO 519 val sbuffer = new LoadForwardQueryIO 520 val lsq = new LoadToLsqIO 521 val refill = Flipped(ValidIO(new Refill)) 522 val fastUop = ValidIO(new MicroOp) // early wakeup signal generated in load_s1, send to RS in load_s2 523 val trigger = Vec(3, new LoadUnitTriggerIO) 524 525 val tlb = new TlbRequestIO 526 val pmp = Flipped(new PMPRespBundle()) // arrive same to tlb now 527 528 val fastpathOut = Output(new LoadToLoadIO) 529 val fastpathIn = Input(Vec(LoadPipelineWidth, new LoadToLoadIO)) 530 val loadFastMatch = Input(UInt(exuParameters.LduCnt.W)) 531 532 val delayedLoadError = Output(Bool()) // load ecc error 533 // Note that io.delayedLoadError and io.lsq.delayedLoadError is different 534 535 val csrCtrl = Flipped(new CustomCSRCtrlIO) 536 }) 537 538 val load_s0 = Module(new LoadUnit_S0) 539 val load_s1 = Module(new LoadUnit_S1) 540 val load_s2 = Module(new LoadUnit_S2) 541 542 load_s0.io.in <> io.ldin 543 load_s0.io.dtlbReq <> io.tlb.req 544 load_s0.io.dcacheReq <> io.dcache.req 545 load_s0.io.rsIdx := io.rsIdx 546 load_s0.io.isFirstIssue := io.isFirstIssue 547 load_s0.io.fastpath := io.fastpathIn 548 load_s0.io.loadFastMatch := io.loadFastMatch 549 550 PipelineConnect(load_s0.io.out, load_s1.io.in, true.B, load_s0.io.out.bits.uop.robIdx.needFlush(io.redirect)) 551 552 load_s1.io.dtlbResp <> io.tlb.resp 553 io.dcache.s1_paddr <> load_s1.io.dcachePAddr 554 io.dcache.s1_kill <> load_s1.io.dcacheKill 555 load_s1.io.sbuffer <> io.sbuffer 556 load_s1.io.lsq <> io.lsq.forward 557 load_s1.io.loadViolationQueryReq <> io.lsq.loadViolationQuery.req 558 load_s1.io.dcacheBankConflict <> io.dcache.s1_bank_conflict 559 load_s1.io.csrCtrl <> io.csrCtrl 560 561 PipelineConnect(load_s1.io.out, load_s2.io.in, true.B, load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) 562 563 io.dcache.s2_kill := load_s2.io.dcache_kill // to kill mmio resp which are redirected 564 load_s2.io.dcacheResp <> io.dcache.resp 565 load_s2.io.pmpResp <> io.pmp 566 load_s2.io.static_pm := RegNext(io.tlb.resp.bits.static_pm) 567 load_s2.io.lsq.forwardData <> io.lsq.forward.forwardData 568 load_s2.io.lsq.forwardMask <> io.lsq.forward.forwardMask 569 load_s2.io.lsq.forwardMaskFast <> io.lsq.forward.forwardMaskFast // should not be used in load_s2 570 load_s2.io.lsq.dataInvalid <> io.lsq.forward.dataInvalid 571 load_s2.io.lsq.matchInvalid <> io.lsq.forward.matchInvalid 572 load_s2.io.sbuffer.forwardData <> io.sbuffer.forwardData 573 load_s2.io.sbuffer.forwardMask <> io.sbuffer.forwardMask 574 load_s2.io.sbuffer.forwardMaskFast <> io.sbuffer.forwardMaskFast // should not be used in load_s2 575 load_s2.io.sbuffer.dataInvalid <> io.sbuffer.dataInvalid // always false 576 load_s2.io.sbuffer.matchInvalid <> io.sbuffer.matchInvalid 577 load_s2.io.dataForwarded <> io.lsq.loadDataForwarded 578 load_s2.io.fastpath <> io.fastpathOut 579 load_s2.io.dataInvalidSqIdx := io.lsq.forward.dataInvalidSqIdx // provide dataInvalidSqIdx to make wakeup faster 580 load_s2.io.loadViolationQueryResp <> io.lsq.loadViolationQuery.resp 581 load_s2.io.csrCtrl <> io.csrCtrl 582 load_s2.io.sentFastUop := io.fastUop.valid 583 584 // actually load s3 585 io.lsq.dcacheRequireReplay := load_s2.io.dcacheRequireReplay 586 io.lsq.delayedLoadError := load_s2.io.delayedLoadError 587 588 // feedback tlb miss / dcache miss queue full 589 io.feedbackSlow.valid := RegNext(load_s2.io.rsFeedback.valid && !load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) 590 io.feedbackSlow.bits := RegNext(load_s2.io.rsFeedback.bits) 591 val s3_replay_for_mshrfull = RegNext(!load_s2.io.rsFeedback.bits.hit && load_s2.io.rsFeedback.bits.sourceType === RSFeedbackType.mshrFull) 592 val s3_refill_hit_load_paddr = refill_addr_hit(RegNext(load_s2.io.out.bits.paddr), io.refill.bits.addr) 593 // update replay request 594 io.feedbackSlow.bits.hit := RegNext(load_s2.io.rsFeedback.bits).hit || 595 s3_refill_hit_load_paddr && s3_replay_for_mshrfull 596 597 // feedback bank conflict to rs 598 io.feedbackFast.bits := load_s1.io.rsFeedback.bits 599 io.feedbackFast.valid := load_s1.io.rsFeedback.valid 600 // If replay is reported at load_s1, inst will be canceled (will not enter load_s2), 601 // in that case: 602 // * replay should not be reported twice 603 assert(!(RegNext(RegNext(io.feedbackFast.valid)) && io.feedbackSlow.valid)) 604 // * io.fastUop.valid should not be reported 605 assert(!RegNext(RegNext(io.feedbackFast.valid) && io.fastUop.valid)) 606 607 // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding 608 val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize)) 609 io.lsq.forward.sqIdxMask := sqIdxMaskReg 610 611 // // use s2_hit_way to select data received in s1 612 // load_s2.io.dcacheResp.bits.data := Mux1H(RegNext(io.dcache.s1_hit_way), RegNext(io.dcache.s1_data)) 613 // assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data) 614 615 // now io.fastUop.valid is sent to RS in load_s2 616 io.fastUop.valid := RegNext( 617 io.dcache.s1_hit_way.orR && // dcache hit 618 !io.dcache.s1_disable_fast_wakeup && // load fast wakeup should be disabled when dcache data read is not ready 619 load_s1.io.in.valid && // valid laod request 620 !load_s1.io.fastUopKill && // not mmio or tlb miss 621 !io.lsq.forward.dataInvalidFast && // forward failed 622 !load_s1.io.needLdVioCheckRedo // load-load violation check: load paddr cam struct hazard 623 ) && !RegNext(load_s1.io.out.bits.uop.robIdx.needFlush(io.redirect)) 624 io.fastUop.bits := RegNext(load_s1.io.out.bits.uop) 625 626 XSDebug(load_s0.io.out.valid, 627 p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " + 628 p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n") 629 XSDebug(load_s1.io.out.valid, 630 p"S1: pc ${Hexadecimal(load_s1.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s1.io.out.bits.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " + 631 p"paddr ${Hexadecimal(load_s1.io.out.bits.paddr)}, mmio ${load_s1.io.out.bits.mmio}\n") 632 633 // writeback to LSQ 634 // Current dcache use MSHR 635 // Load queue will be updated at s2 for both hit/miss int/fp load 636 io.lsq.loadIn.valid := load_s2.io.out.valid 637 io.lsq.loadIn.bits := load_s2.io.out.bits 638 639 // write to rob and writeback bus 640 val s2_wb_valid = load_s2.io.out.valid && !load_s2.io.out.bits.miss && !load_s2.io.out.bits.mmio 641 642 // Int load, if hit, will be writebacked at s2 643 val hitLoadOut = Wire(Valid(new ExuOutput)) 644 hitLoadOut.valid := s2_wb_valid 645 hitLoadOut.bits.uop := load_s2.io.out.bits.uop 646 hitLoadOut.bits.data := load_s2.io.out.bits.data 647 hitLoadOut.bits.redirectValid := false.B 648 hitLoadOut.bits.redirect := DontCare 649 hitLoadOut.bits.debug.isMMIO := load_s2.io.out.bits.mmio 650 hitLoadOut.bits.debug.isPerfCnt := false.B 651 hitLoadOut.bits.debug.paddr := load_s2.io.out.bits.paddr 652 hitLoadOut.bits.debug.vaddr := load_s2.io.out.bits.vaddr 653 hitLoadOut.bits.fflags := DontCare 654 655 load_s2.io.out.ready := true.B 656 657 val load_wb_reg = RegNext(Mux(hitLoadOut.valid, hitLoadOut.bits, io.lsq.ldout.bits)) 658 io.ldout.bits := load_wb_reg 659 io.ldout.valid := RegNext(hitLoadOut.valid) && !RegNext(load_s2.io.out.bits.uop.robIdx.needFlush(io.redirect)) || 660 RegNext(io.lsq.ldout.valid) && !RegNext(io.lsq.ldout.bits.uop.robIdx.needFlush(io.redirect)) && !RegNext(hitLoadOut.valid) 661 662 // io.ldout.bits.uop.cf.exceptionVec(loadAccessFault) := load_wb_reg.uop.cf.exceptionVec(loadAccessFault) || 663 // hitLoadOut.valid && load_s2.io.delayedLoadError 664 665 // io.delayedLoadError := false.B 666 667 io.delayedLoadError := hitLoadOut.valid && load_s2.io.delayedLoadError 668 669 io.lsq.ldout.ready := !hitLoadOut.valid 670 671 when(io.feedbackSlow.valid && !io.feedbackSlow.bits.hit){ 672 // when need replay from rs, inst should not be writebacked to rob 673 assert(RegNext(!hitLoadOut.valid)) 674 // when need replay from rs 675 // * inst should not be writebacked to lq, or 676 // * lq state will be updated in load_s3 (next cycle) 677 assert(RegNext(!io.lsq.loadIn.valid) || RegNext(load_s2.io.dcacheRequireReplay)) 678 } 679 680 val lastValidData = RegEnable(io.ldout.bits.data, io.ldout.fire()) 681 val hitLoadAddrTriggerHitVec = Wire(Vec(3, Bool())) 682 val lqLoadAddrTriggerHitVec = io.lsq.trigger.lqLoadAddrTriggerHitVec 683 (0 until 3).map{i => { 684 val tdata2 = io.trigger(i).tdata2 685 val matchType = io.trigger(i).matchType 686 val tEnable = io.trigger(i).tEnable 687 688 hitLoadAddrTriggerHitVec(i) := TriggerCmp(load_s2.io.out.bits.vaddr, tdata2, matchType, tEnable) 689 io.trigger(i).addrHit := Mux(hitLoadOut.valid, hitLoadAddrTriggerHitVec(i), lqLoadAddrTriggerHitVec(i)) 690 io.trigger(i).lastDataHit := TriggerCmp(lastValidData, tdata2, matchType, tEnable) 691 }} 692 io.lsq.trigger.hitLoadAddrTriggerHitVec := hitLoadAddrTriggerHitVec 693 694 val perfEvents = Seq( 695 ("load_s0_in_fire ", load_s0.io.in.fire() ), 696 ("load_to_load_forward ", load_s0.io.loadFastMatch.orR && load_s0.io.in.fire() ), 697 ("stall_dcache ", load_s0.io.out.valid && load_s0.io.out.ready && !load_s0.io.dcacheReq.ready ), 698 ("addr_spec_success ", load_s0.io.out.fire() && load_s0.io.dtlbReq.bits.vaddr(VAddrBits-1, 12) === load_s0.io.in.bits.src(0)(VAddrBits-1, 12) ), 699 ("addr_spec_failed ", load_s0.io.out.fire() && load_s0.io.dtlbReq.bits.vaddr(VAddrBits-1, 12) =/= load_s0.io.in.bits.src(0)(VAddrBits-1, 12) ), 700 ("load_s1_in_fire ", load_s1.io.in.fire ), 701 ("load_s1_tlb_miss ", load_s1.io.in.fire && load_s1.io.dtlbResp.bits.miss ), 702 ("load_s2_in_fire ", load_s2.io.in.fire ), 703 ("load_s2_dcache_miss ", load_s2.io.in.fire && load_s2.io.dcacheResp.bits.miss ), 704 ("load_s2_replay ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit ), 705 ("load_s2_replay_tlb_miss ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && load_s2.io.in.bits.tlbMiss ), 706 ("load_s2_replay_cache ", load_s2.io.rsFeedback.valid && !load_s2.io.rsFeedback.bits.hit && !load_s2.io.in.bits.tlbMiss && load_s2.io.dcacheResp.bits.miss), 707 ) 708 generatePerfEvent() 709 710 when(io.ldout.fire()){ 711 XSDebug("ldout %x\n", io.ldout.bits.uop.cf.pc) 712 } 713} 714