1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.cache.{AtomicWordIO, HasDCacheParameters, MemoryOpConstants} 26import xiangshan.cache.mmu.{TlbCmd, TlbRequestIO} 27import difftest._ 28import xiangshan.ExceptionNO._ 29import xiangshan.backend.fu.PMPRespBundle 30import xiangshan.backend.Bundles.{MemExuInput, MemExuOutput} 31import xiangshan.backend.fu.NewCSR.TriggerUtil 32import xiangshan.backend.fu.util.SdtrigExt 33 34class AtomicsUnit(implicit p: Parameters) extends XSModule 35 with MemoryOpConstants 36 with HasDCacheParameters 37 with SdtrigExt{ 38 val io = IO(new Bundle() { 39 val hartId = Input(UInt(hartIdLen.W)) 40 val in = Flipped(Decoupled(new MemExuInput)) 41 val storeDataIn = Flipped(Valid(new MemExuOutput)) // src2 from rs 42 val out = Decoupled(new MemExuOutput) 43 val dcache = new AtomicWordIO 44 val dtlb = new TlbRequestIO(2) 45 val pmpResp = Flipped(new PMPRespBundle()) 46 val flush_sbuffer = new SbufferFlushBundle 47 val feedbackSlow = ValidIO(new RSFeedback) 48 val redirect = Flipped(ValidIO(new Redirect)) 49 val exceptionInfo = ValidIO(new Bundle { 50 val vaddr = UInt(XLEN.W) 51 val gpaddr = UInt(XLEN.W) 52 val isForVSnonLeafPTE = Bool() 53 }) 54 val csrCtrl = Flipped(new CustomCSRCtrlIO) 55 }) 56 57 //------------------------------------------------------- 58 // Atomics Memory Accsess FSM 59 //------------------------------------------------------- 60 val s_invalid :: s_tlb_and_flush_sbuffer_req :: s_pm :: s_wait_flush_sbuffer_resp :: s_cache_req :: s_cache_resp :: s_cache_resp_latch :: s_finish :: Nil = Enum(8) 61 val state = RegInit(s_invalid) 62 val out_valid = RegInit(false.B) 63 val data_valid = RegInit(false.B) 64 val in = Reg(new MemExuInput()) 65 val exceptionVec = RegInit(0.U.asTypeOf(ExceptionVec())) 66 val trigger = RegInit(TriggerAction.None) 67 val atom_override_xtval = RegInit(false.B) 68 val have_sent_first_tlb_req = RegInit(false.B) 69 // paddr after translation 70 val paddr = Reg(UInt()) 71 val gpaddr = Reg(UInt()) 72 val vaddr = in.src(0) 73 val is_mmio = Reg(Bool()) 74 val isForVSnonLeafPTE = Reg(Bool()) 75 76 // dcache response data 77 val resp_data = Reg(UInt()) 78 val resp_data_wire = WireInit(0.U) 79 val is_lrsc_valid = Reg(Bool()) 80 // sbuffer is empty or not 81 val sbuffer_empty = io.flush_sbuffer.empty 82 83 84 // Difftest signals 85 val paddr_reg = Reg(UInt(64.W)) 86 val data_reg = Reg(UInt(64.W)) 87 val mask_reg = Reg(UInt(8.W)) 88 val fuop_reg = Reg(UInt(8.W)) 89 90 io.exceptionInfo.valid := atom_override_xtval 91 io.exceptionInfo.bits.vaddr := in.src(0) 92 io.exceptionInfo.bits.gpaddr := gpaddr 93 io.exceptionInfo.bits.isForVSnonLeafPTE := isForVSnonLeafPTE 94 95 // assign default value to output signals 96 io.in.ready := false.B 97 98 io.dcache.req.valid := false.B 99 io.dcache.req.bits := DontCare 100 101 io.dtlb.req.valid := false.B 102 io.dtlb.req.bits := DontCare 103 io.dtlb.req_kill := false.B 104 io.dtlb.resp.ready := true.B 105 106 io.flush_sbuffer.valid := false.B 107 108 when (state === s_invalid) { 109 io.in.ready := true.B 110 when (io.in.fire) { 111 in := io.in.bits 112 in.src(1) := in.src(1) // leave src2 unchanged 113 state := s_tlb_and_flush_sbuffer_req 114 have_sent_first_tlb_req := false.B 115 } 116 } 117 118 when (io.storeDataIn.fire) { 119 in.src(1) := io.storeDataIn.bits.data 120 data_valid := true.B 121 } 122 123 // TODO: remove this for AMOCAS 124 assert(!(io.storeDataIn.fire && data_valid), "atomic unit re-receive data") 125 126 // Send TLB feedback to store issue queue 127 // we send feedback right after we receives request 128 // also, we always treat amo as tlb hit 129 // since we will continue polling tlb all by ourself 130 io.feedbackSlow.valid := GatedValidRegNext(GatedValidRegNext(io.in.valid)) 131 io.feedbackSlow.bits.hit := true.B 132 io.feedbackSlow.bits.robIdx := RegEnable(io.in.bits.uop.robIdx, io.in.valid) 133 io.feedbackSlow.bits.sqIdx := RegEnable(io.in.bits.uop.sqIdx, io.in.valid) 134 io.feedbackSlow.bits.lqIdx := RegEnable(io.in.bits.uop.lqIdx, io.in.valid) 135 io.feedbackSlow.bits.flushState := DontCare 136 io.feedbackSlow.bits.sourceType := DontCare 137 io.feedbackSlow.bits.dataInvalidSqIdx := DontCare 138 139 // atomic trigger 140 val csrCtrl = io.csrCtrl 141 val tdata = Reg(Vec(TriggerNum, new MatchTriggerIO)) 142 val tEnableVec = RegInit(VecInit(Seq.fill(TriggerNum)(false.B))) 143 tEnableVec := csrCtrl.mem_trigger.tEnableVec 144 when (csrCtrl.mem_trigger.tUpdate.valid) { 145 tdata(csrCtrl.mem_trigger.tUpdate.bits.addr) := csrCtrl.mem_trigger.tUpdate.bits.tdata 146 } 147 148 val debugMode = csrCtrl.mem_trigger.debugMode 149 val triggerCanRaiseBpExp = csrCtrl.mem_trigger.triggerCanRaiseBpExp 150 val backendTriggerTimingVec = VecInit(tdata.map(_.timing)) 151 val backendTriggerChainVec = VecInit(tdata.map(_.chain)) 152 val backendTriggerHitVec = WireInit(VecInit(Seq.fill(TriggerNum)(false.B))) 153 val backendTriggerCanFireVec = RegInit(VecInit(Seq.fill(TriggerNum)(false.B))) 154 155 assert(state === s_invalid || in.uop.fuOpType(1,0) === "b10".U || in.uop.fuOpType(1,0) === "b11".U, 156 "Only word or doubleword is supported") 157 val isLr = in.uop.fuOpType === LSUOpType.lr_w || in.uop.fuOpType === LSUOpType.lr_d 158 val isSc = in.uop.fuOpType === LSUOpType.sc_w || in.uop.fuOpType === LSUOpType.sc_d 159 val isNotLr = !isLr 160 val isNotSc = !isSc 161 162 // store trigger 163 val store_hit = Wire(Vec(TriggerNum, Bool())) 164 for (j <- 0 until TriggerNum) { 165 store_hit(j) := !tdata(j).select && !debugMode && isNotLr && TriggerCmp( 166 vaddr, 167 tdata(j).tdata2, 168 tdata(j).matchType, 169 tEnableVec(j) && tdata(j).store 170 ) 171 } 172 // load trigger 173 val load_hit = Wire(Vec(TriggerNum, Bool())) 174 for (j <- 0 until TriggerNum) { 175 load_hit(j) := !tdata(j).select && !debugMode && isNotSc && TriggerCmp( 176 vaddr, 177 tdata(j).tdata2, 178 tdata(j).matchType, 179 tEnableVec(j) && tdata(j).load 180 ) 181 } 182 backendTriggerHitVec := store_hit.zip(load_hit).map { case (sh, lh) => sh || lh } 183 // triggerCanFireVec will update at T+1 184 TriggerCheckCanFire(TriggerNum, backendTriggerCanFireVec, backendTriggerHitVec, backendTriggerTimingVec, backendTriggerChainVec) 185 186 val actionVec = VecInit(tdata.map(_.action)) 187 val triggerAction = Wire(TriggerAction()) 188 TriggerUtil.triggerActionGen(triggerAction, backendTriggerCanFireVec, actionVec, triggerCanRaiseBpExp) 189 val triggerDebugMode = TriggerAction.isDmode(triggerAction) 190 val triggerBreakpoint = TriggerAction.isExp(triggerAction) 191 192 // tlb translation, manipulating signals && deal with exception 193 // at the same time, flush sbuffer 194 when (state === s_tlb_and_flush_sbuffer_req) { 195 // send req to dtlb 196 // keep firing until tlb hit 197 io.dtlb.req.valid := true.B 198 io.dtlb.req.bits.vaddr := in.src(0) 199 io.dtlb.req.bits.fullva := in.src(0) 200 io.dtlb.req.bits.checkfullva := true.B 201 io.dtlb.resp.ready := true.B 202 io.dtlb.req.bits.cmd := Mux(isLr, TlbCmd.atom_read, TlbCmd.atom_write) 203 io.dtlb.req.bits.debug.pc := in.uop.pc 204 io.dtlb.req.bits.debug.robIdx := in.uop.robIdx 205 io.dtlb.req.bits.debug.isFirstIssue := false.B 206 io.out.bits.uop.debugInfo.tlbFirstReqTime := GTimer() // FIXME lyq: it will be always assigned 207 208 // send req to sbuffer to flush it if it is not empty 209 io.flush_sbuffer.valid := !sbuffer_empty 210 211 // do not accept tlb resp in the first cycle 212 // this limition is for hw prefetcher 213 // when !have_sent_first_tlb_req, tlb resp may come from hw prefetch 214 have_sent_first_tlb_req := true.B 215 216 when (io.dtlb.resp.fire && have_sent_first_tlb_req){ 217 paddr := io.dtlb.resp.bits.paddr(0) 218 gpaddr := io.dtlb.resp.bits.gpaddr(0) 219 isForVSnonLeafPTE := io.dtlb.resp.bits.isForVSnonLeafPTE 220 // exception handling 221 val addrAligned = LookupTree(in.uop.fuOpType(1,0), List( 222 "b10".U -> (in.src(0)(1,0) === 0.U), //w 223 "b11".U -> (in.src(0)(2,0) === 0.U) //d 224 )) 225 exceptionVec(loadAddrMisaligned) := !addrAligned && isLr 226 exceptionVec(storeAddrMisaligned) := !addrAligned && !isLr 227 exceptionVec(storePageFault) := io.dtlb.resp.bits.excp(0).pf.st 228 exceptionVec(loadPageFault) := io.dtlb.resp.bits.excp(0).pf.ld 229 exceptionVec(storeAccessFault) := io.dtlb.resp.bits.excp(0).af.st 230 exceptionVec(loadAccessFault) := io.dtlb.resp.bits.excp(0).af.ld 231 exceptionVec(storeGuestPageFault) := io.dtlb.resp.bits.excp(0).gpf.st 232 exceptionVec(loadGuestPageFault) := io.dtlb.resp.bits.excp(0).gpf.ld 233 234 exceptionVec(breakPoint) := triggerBreakpoint 235 trigger := triggerAction 236 237 when (!io.dtlb.resp.bits.miss) { 238 io.out.bits.uop.debugInfo.tlbRespTime := GTimer() 239 when (!addrAligned || triggerDebugMode || triggerBreakpoint) { 240 // NOTE: when addrAligned or trigger fire, do not need to wait tlb actually 241 // check for miss aligned exceptions, tlb exception are checked next cycle for timing 242 // if there are exceptions, no need to execute it 243 state := s_finish 244 out_valid := true.B 245 atom_override_xtval := true.B 246 } .otherwise { 247 state := s_pm 248 } 249 } 250 } 251 } 252 253 when (state === s_pm) { 254 val pmp = WireInit(io.pmpResp) 255 is_mmio := pmp.mmio 256 257 // NOTE: only handle load/store exception here, if other exception happens, don't send here 258 val exception_va = exceptionVec(storePageFault) || exceptionVec(loadPageFault) || 259 exceptionVec(storeGuestPageFault) || exceptionVec(loadGuestPageFault) || 260 exceptionVec(storeAccessFault) || exceptionVec(loadAccessFault) 261 val exception_pa = pmp.st || pmp.ld || pmp.mmio 262 when (exception_va || exception_pa) { 263 state := s_finish 264 out_valid := true.B 265 atom_override_xtval := true.B 266 }.otherwise { 267 // if sbuffer has been flushed, go to query dcache, otherwise wait for sbuffer. 268 state := Mux(sbuffer_empty, s_cache_req, s_wait_flush_sbuffer_resp); 269 } 270 // update storeAccessFault bit 271 exceptionVec(loadAccessFault) := exceptionVec(loadAccessFault) || (pmp.ld || pmp.mmio) && isLr 272 exceptionVec(storeAccessFault) := exceptionVec(storeAccessFault) || pmp.st || (pmp.ld || pmp.mmio) && !isLr 273 } 274 275 when (state === s_wait_flush_sbuffer_resp) { 276 when (sbuffer_empty) { 277 state := s_cache_req 278 } 279 } 280 281 when (state === s_cache_req) { 282 val pipe_req = io.dcache.req.bits 283 pipe_req := DontCare 284 285 pipe_req.cmd := LookupTree(in.uop.fuOpType, List( 286 LSUOpType.lr_w -> M_XLR, 287 LSUOpType.sc_w -> M_XSC, 288 LSUOpType.amoswap_w -> M_XA_SWAP, 289 LSUOpType.amoadd_w -> M_XA_ADD, 290 LSUOpType.amoxor_w -> M_XA_XOR, 291 LSUOpType.amoand_w -> M_XA_AND, 292 LSUOpType.amoor_w -> M_XA_OR, 293 LSUOpType.amomin_w -> M_XA_MIN, 294 LSUOpType.amomax_w -> M_XA_MAX, 295 LSUOpType.amominu_w -> M_XA_MINU, 296 LSUOpType.amomaxu_w -> M_XA_MAXU, 297 298 LSUOpType.lr_d -> M_XLR, 299 LSUOpType.sc_d -> M_XSC, 300 LSUOpType.amoswap_d -> M_XA_SWAP, 301 LSUOpType.amoadd_d -> M_XA_ADD, 302 LSUOpType.amoxor_d -> M_XA_XOR, 303 LSUOpType.amoand_d -> M_XA_AND, 304 LSUOpType.amoor_d -> M_XA_OR, 305 LSUOpType.amomin_d -> M_XA_MIN, 306 LSUOpType.amomax_d -> M_XA_MAX, 307 LSUOpType.amominu_d -> M_XA_MINU, 308 LSUOpType.amomaxu_d -> M_XA_MAXU 309 )) 310 pipe_req.miss := false.B 311 pipe_req.probe := false.B 312 pipe_req.probe_need_data := false.B 313 pipe_req.source := AMO_SOURCE.U 314 pipe_req.addr := get_block_addr(paddr) 315 pipe_req.vaddr := get_block_addr(in.src(0)) // vaddr 316 pipe_req.word_idx := get_word(paddr) 317 pipe_req.amo_data := genWdata(in.src(1), in.uop.fuOpType(1,0)) 318 pipe_req.amo_mask := genWmask(paddr, in.uop.fuOpType(1,0)) 319 320 io.dcache.req.valid := Mux( 321 io.dcache.req.bits.cmd === M_XLR, 322 !io.dcache.block_lr, // block lr to survive in lr storm 323 data_valid // wait until src(1) is ready 324 ) 325 326 when(io.dcache.req.fire){ 327 state := s_cache_resp 328 paddr_reg := paddr 329 data_reg := io.dcache.req.bits.amo_data 330 mask_reg := io.dcache.req.bits.amo_mask 331 fuop_reg := in.uop.fuOpType 332 } 333 } 334 335 val dcache_resp_data = Reg(UInt()) 336 val dcache_resp_id = Reg(UInt()) 337 val dcache_resp_error = Reg(Bool()) 338 339 when (state === s_cache_resp) { 340 // when not miss 341 // everything is OK, simply send response back to sbuffer 342 // when miss and not replay 343 // wait for missQueue to handling miss and replaying our request 344 // when miss and replay 345 // req missed and fail to enter missQueue, manually replay it later 346 // TODO: add assertions: 347 // 1. add a replay delay counter? 348 // 2. when req gets into MissQueue, it should not miss any more 349 when(io.dcache.resp.fire) { 350 when(io.dcache.resp.bits.miss) { 351 when(io.dcache.resp.bits.replay) { 352 state := s_cache_req 353 } 354 } .otherwise { 355 dcache_resp_data := io.dcache.resp.bits.data 356 dcache_resp_id := io.dcache.resp.bits.id 357 dcache_resp_error := io.dcache.resp.bits.error 358 state := s_cache_resp_latch 359 } 360 } 361 } 362 363 when (state === s_cache_resp_latch) { 364 is_lrsc_valid := dcache_resp_id 365 val rdataSel = LookupTree(paddr(2, 0), List( 366 "b000".U -> dcache_resp_data(63, 0), 367 "b100".U -> dcache_resp_data(63, 32) 368 )) 369 370 resp_data_wire := Mux( 371 isSc, 372 dcache_resp_data, 373 LookupTree(in.uop.fuOpType(1,0), List( 374 "b10".U -> SignExt(rdataSel(31, 0), XLEN), // w 375 "b11".U -> SignExt(rdataSel(63, 0), XLEN) // d 376 )) 377 ) 378 379 when (dcache_resp_error && io.csrCtrl.cache_error_enable) { 380 exceptionVec(loadAccessFault) := isLr 381 exceptionVec(storeAccessFault) := !isLr 382 assert(!exceptionVec(loadAccessFault)) 383 assert(!exceptionVec(storeAccessFault)) 384 } 385 386 resp_data := resp_data_wire 387 state := s_finish 388 out_valid := true.B 389 } 390 391 io.out.valid := out_valid 392 XSError((state === s_finish) =/= out_valid, "out_valid reg error\n") 393 io.out.bits := DontCare 394 io.out.bits.uop := in.uop 395 io.out.bits.uop.exceptionVec := exceptionVec 396 io.out.bits.uop.trigger := trigger 397 io.out.bits.data := resp_data 398 io.out.bits.debug.isMMIO := is_mmio 399 io.out.bits.debug.paddr := paddr 400 when (io.out.fire) { 401 XSDebug("atomics writeback: pc %x data %x\n", io.out.bits.uop.pc, io.dcache.resp.bits.data) 402 state := s_invalid 403 out_valid := false.B 404 } 405 406 when (state === s_finish) { 407 data_valid := false.B 408 } 409 410 when (io.redirect.valid) { 411 atom_override_xtval := false.B 412 } 413 414 if (env.EnableDifftest) { 415 val difftest = DifftestModule(new DiffAtomicEvent) 416 difftest.coreid := io.hartId 417 difftest.valid := state === s_cache_resp_latch 418 difftest.addr := paddr_reg 419 difftest.data := data_reg 420 difftest.mask := mask_reg 421 difftest.fuop := fuop_reg 422 difftest.out := resp_data_wire 423 } 424 425 if (env.EnableDifftest || env.AlwaysBasicDiff) { 426 val uop = io.out.bits.uop 427 val difftest = DifftestModule(new DiffLrScEvent) 428 difftest.coreid := io.hartId 429 difftest.valid := io.out.fire && 430 (uop.fuOpType === LSUOpType.sc_d || uop.fuOpType === LSUOpType.sc_w) 431 difftest.success := is_lrsc_valid 432 } 433} 434