xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MainPipe.scala (revision dccbba583f76ab58717f24e02957d2493ac012b8)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.tilelink.ClientStates._
23import freechips.rocketchip.tilelink.MemoryOpCategories._
24import freechips.rocketchip.tilelink.TLPermissions._
25import freechips.rocketchip.tilelink.{ClientMetadata, ClientStates, TLPermissions}
26import utils._
27import utility._
28import xiangshan.{L1CacheErrorInfo, XSCoreParamsKey}
29import xiangshan.mem.prefetch._
30import xiangshan.mem.HasL1PrefetchSourceParameter
31
32class MainPipeReq(implicit p: Parameters) extends DCacheBundle {
33  val miss = Bool() // only amo miss will refill in main pipe
34  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
35  val miss_param = UInt(TLPermissions.bdWidth.W)
36  val miss_dirty = Bool()
37
38  val probe = Bool()
39  val probe_param = UInt(TLPermissions.bdWidth.W)
40  val probe_need_data = Bool()
41
42  // request info
43  // reqs from Store, AMO use this
44  // probe does not use this
45  val source = UInt(sourceTypeWidth.W)
46  val cmd = UInt(M_SZ.W)
47  // if dcache size > 32KB, vaddr is also needed for store
48  // vaddr is used to get extra index bits
49  val vaddr  = UInt(VAddrBits.W)
50  // must be aligned to block
51  val addr   = UInt(PAddrBits.W)
52
53  // store
54  val store_data = UInt((cfg.blockBytes * 8).W)
55  val store_mask = UInt(cfg.blockBytes.W)
56
57  // which word does amo work on?
58  val word_idx = UInt(log2Up(cfg.blockBytes * 8 / DataBits).W)
59  val amo_data   = UInt(QuadWordBits.W)
60  val amo_mask   = UInt(QuadWordBytes.W)
61  val amo_cmp    = UInt(QuadWordBits.W) // data to be compared in AMOCAS
62
63  // error
64  val error = Bool()
65
66  // replace
67  val replace = Bool()
68  val replace_way_en = UInt(DCacheWays.W)
69
70  // prefetch
71  val pf_source = UInt(L1PfSourceBits.W)
72  val access = Bool()
73
74  val id = UInt(reqIdWidth.W)
75
76  def isLoad: Bool = source === LOAD_SOURCE.U
77  def isStore: Bool = source === STORE_SOURCE.U
78  def isAMO: Bool = source === AMO_SOURCE.U
79
80  def quad_word_idx = word_idx >> 1
81
82  def convertStoreReq(store: DCacheLineReq): MainPipeReq = {
83    val req = Wire(new MainPipeReq)
84    req := DontCare
85    req.miss := false.B
86    req.miss_dirty := false.B
87    req.probe := false.B
88    req.probe_need_data := false.B
89    req.source := STORE_SOURCE.U
90    req.cmd := store.cmd
91    req.addr := store.addr
92    req.vaddr := store.vaddr
93    req.store_data := store.data
94    req.store_mask := store.mask
95    req.replace := false.B
96    req.error := false.B
97    req.id := store.id
98    req
99  }
100}
101
102class MainPipeStatus(implicit p: Parameters) extends DCacheBundle {
103  val set = UInt(idxBits.W)
104  val way_en = UInt(nWays.W)
105}
106
107class MainPipeInfoToMQ(implicit p:Parameters) extends DCacheBundle {
108  val s2_valid = Bool()
109  val s2_miss_id = UInt(log2Up(cfg.nMissEntries).W) // For refill data selection
110  val s2_replay_to_mq = Bool()
111  val s3_valid = Bool()
112  val s3_miss_id = UInt(log2Up(cfg.nMissEntries).W) // For mshr release
113  val s3_refill_resp = Bool()
114}
115
116class MainPipe(implicit p: Parameters) extends DCacheModule with HasPerfEvents with HasL1PrefetchSourceParameter {
117  val io = IO(new Bundle() {
118    // probe queue
119    val probe_req = Flipped(DecoupledIO(new MainPipeReq))
120    // store miss go to miss queue
121    val miss_req = DecoupledIO(new MissReq)
122    val miss_resp = Input(new MissResp) // miss resp is used to support plru update
123    val refill_req = Flipped(DecoupledIO(new MainPipeReq))
124    // send miss request to wbq
125    val wbq_conflict_check = Valid(UInt())
126    val wbq_block_miss_req = Input(Bool())
127    // store buffer
128    val store_req = Flipped(DecoupledIO(new DCacheLineReq))
129    val store_replay_resp = ValidIO(new DCacheLineResp)
130    val store_hit_resp = ValidIO(new DCacheLineResp)
131    // atmoics
132    val atomic_req = Flipped(DecoupledIO(new MainPipeReq))
133    val atomic_resp = ValidIO(new MainPipeResp)
134    // find matched refill data in missentry
135    val mainpipe_info = Output(new MainPipeInfoToMQ)
136    // missqueue refill data
137    val refill_info = Flipped(ValidIO(new MissQueueRefillInfo))
138    // write-back queue
139    val wb = DecoupledIO(new WritebackReq)
140    val wb_ready_dup = Vec(nDupWbReady, Input(Bool()))
141
142    // data sram
143    val data_read = Vec(LoadPipelineWidth, Input(Bool()))
144    val data_read_intend = Output(Bool())
145    val data_readline = DecoupledIO(new L1BankedDataReadLineReq)
146    val data_readline_can_go = Output(Bool())
147    val data_readline_stall = Output(Bool())
148    val data_readline_can_resp = Output(Bool())
149    val data_resp = Input(Vec(DCacheBanks, new L1BankedDataReadResult()))
150    val readline_error_delayed = Input(Bool())
151    val data_write = DecoupledIO(new L1BankedDataWriteReq)
152    val data_write_dup = Vec(DCacheBanks, Valid(new L1BankedDataWriteReqCtrl))
153    val data_write_ready_dup = Vec(nDupDataWriteReady, Input(Bool()))
154
155    // meta array
156    val meta_read = DecoupledIO(new MetaReadReq)
157    val meta_resp = Input(Vec(nWays, new Meta))
158    val meta_write = DecoupledIO(new CohMetaWriteReq)
159    val extra_meta_resp = Input(Vec(nWays, new DCacheExtraMeta))
160    val error_flag_write = DecoupledIO(new FlagMetaWriteReq)
161    val prefetch_flag_write = DecoupledIO(new SourceMetaWriteReq)
162    val access_flag_write = DecoupledIO(new FlagMetaWriteReq)
163
164    // tag sram
165    val tag_read = DecoupledIO(new TagReadReq)
166    val tag_resp = Input(Vec(nWays, UInt(encTagBits.W)))
167    val tag_write = DecoupledIO(new TagWriteReq)
168    val tag_write_ready_dup = Vec(nDupTagWriteReady, Input(Bool()))
169    val tag_write_intend = Output(new Bool())
170
171    // update state vec in replacement algo
172    val replace_access = ValidIO(new ReplacementAccessBundle)
173    // find the way to be replaced
174    val replace_way = new ReplacementWayReqIO
175
176    // writeback addr to be replaced
177    val replace_addr = ValidIO(UInt(PAddrBits.W))
178    val replace_block = Input(Bool())
179
180    // sms prefetch
181    val sms_agt_evict_req = DecoupledIO(new AGTEvictReq)
182
183    val status = new Bundle() {
184      val s0_set = ValidIO(UInt(idxBits.W))
185      val s1, s2, s3 = ValidIO(new MainPipeStatus)
186    }
187    val status_dup = Vec(nDupStatus, new Bundle() {
188      val s1, s2, s3 = ValidIO(new MainPipeStatus)
189    })
190
191    // lrsc locked block should block probe
192    val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W)))
193    val invalid_resv_set = Input(Bool())
194    val update_resv_set = Output(Bool())
195    val block_lr = Output(Bool())
196
197    // ecc error
198    val error = Output(ValidIO(new L1CacheErrorInfo))
199    val pseudo_error = Flipped(DecoupledIO(Vec(DCacheBanks, new CtrlUnitSignalingBundle)))
200    val pseudo_tag_error_inj_done = Output(Bool())
201    val pseudo_data_error_inj_done = Output(Bool())
202    // force write
203    val force_write = Input(Bool())
204
205    val bloom_filter_query = new Bundle {
206      val set = ValidIO(new BloomQueryBundle(BLOOM_FILTER_ENTRY_NUM))
207      val clr = ValidIO(new BloomQueryBundle(BLOOM_FILTER_ENTRY_NUM))
208    }
209  })
210
211  // meta array is made of regs, so meta write or read should always be ready
212  assert(RegNext(io.meta_read.ready))
213  assert(RegNext(io.meta_write.ready))
214
215  val s1_s0_set_conflict, s2_s0_set_conlict, s3_s0_set_conflict = Wire(Bool())
216  val set_conflict = s1_s0_set_conflict || s2_s0_set_conlict || s3_s0_set_conflict
217  // check sbuffer store req set_conflict in parallel with req arbiter
218  // it will speed up the generation of store_req.ready, which is in crit. path
219  val s1_s0_set_conflict_store, s2_s0_set_conlict_store, s3_s0_set_conflict_store = Wire(Bool())
220  val store_set_conflict = s1_s0_set_conflict_store || s2_s0_set_conlict_store || s3_s0_set_conflict_store
221  val s1_ready, s2_ready, s3_ready = Wire(Bool())
222
223  // convert store req to main pipe req, and select a req from store and probe
224  val storeWaitCycles = RegInit(0.U(4.W))
225  val StoreWaitThreshold = Wire(UInt(4.W))
226  StoreWaitThreshold := Constantin.createRecord(s"StoreWaitThreshold_${p(XSCoreParamsKey).HartId}", initValue = 0)
227  val storeWaitTooLong = storeWaitCycles >= StoreWaitThreshold
228  val loadsAreComing = io.data_read.asUInt.orR
229  val storeCanAccept = storeWaitTooLong || !loadsAreComing || io.force_write
230
231  val store_req = Wire(DecoupledIO(new MainPipeReq))
232  store_req.bits := (new MainPipeReq).convertStoreReq(io.store_req.bits)
233  store_req.valid := io.store_req.valid && storeCanAccept
234  io.store_req.ready := store_req.ready && storeCanAccept
235
236
237  when (store_req.fire) { // if wait too long and write success, reset counter.
238    storeWaitCycles := 0.U
239  } .elsewhen (storeWaitCycles < StoreWaitThreshold && io.store_req.valid && !store_req.ready) { // if block store, increase counter.
240    storeWaitCycles := storeWaitCycles + 1.U
241  }
242
243  // s0: read meta and tag
244  val req = Wire(DecoupledIO(new MainPipeReq))
245  arbiter(
246    in = Seq(
247      io.probe_req,
248      io.refill_req,
249      store_req, // Note: store_req.ready is now manually assigned for better timing
250      io.atomic_req,
251    ),
252    out = req,
253    name = Some("main_pipe_req")
254  )
255
256  val store_idx = get_idx(io.store_req.bits.vaddr)
257  // manually assign store_req.ready for better timing
258  // now store_req set conflict check is done in parallel with req arbiter
259  store_req.ready := io.meta_read.ready && io.tag_read.ready && s1_ready && !store_set_conflict &&
260    !io.probe_req.valid && !io.refill_req.valid && !io.atomic_req.valid
261  val s0_req = req.bits
262  val s0_idx = get_idx(s0_req.vaddr)
263  val s0_need_tag = io.tag_read.valid
264  val s0_can_go = io.meta_read.ready && io.tag_read.ready && s1_ready && !set_conflict
265  val s0_fire = req.valid && s0_can_go
266
267  req.ready := s0_can_go
268
269  val bank_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).orR)).asUInt
270  val bank_full_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).andR)).asUInt
271  val banks_full_overwrite = bank_full_write.andR
272
273  val banked_store_rmask = bank_write & ~bank_full_write
274  val banked_full_rmask = ~0.U(DCacheBanks.W)
275  val banked_none_rmask = 0.U(DCacheBanks.W)
276
277  val store_need_data = !s0_req.probe && s0_req.isStore && banked_store_rmask.orR
278  val probe_need_data = s0_req.probe
279  val amo_need_data = !s0_req.probe && s0_req.isAMO
280  val miss_need_data = s0_req.miss
281  val replace_need_data = s0_req.replace
282
283  val banked_need_data = store_need_data || probe_need_data || amo_need_data || miss_need_data || replace_need_data
284
285  val s0_banked_rmask = Mux(store_need_data, banked_store_rmask,
286    Mux(probe_need_data || amo_need_data || miss_need_data || replace_need_data,
287      banked_full_rmask,
288      banked_none_rmask
289    ))
290
291  // generate wmask here and use it in stage 2
292  val banked_store_wmask = bank_write
293  val banked_full_wmask = ~0.U(DCacheBanks.W)
294  val banked_none_wmask = 0.U(DCacheBanks.W)
295
296  // s1: read data
297  val s1_valid = RegInit(false.B)
298  val s1_need_data = RegEnable(banked_need_data, s0_fire)
299  val s1_req = RegEnable(s0_req, s0_fire)
300  val s1_banked_rmask = RegEnable(s0_banked_rmask, s0_fire)
301  val s1_banked_store_wmask = RegEnable(banked_store_wmask, s0_fire)
302  val s1_need_tag = RegEnable(s0_need_tag, s0_fire)
303  val s1_can_go = s2_ready && (io.data_readline.ready || !s1_need_data)
304  val s1_fire = s1_valid && s1_can_go
305  val s1_idx = get_idx(s1_req.vaddr)
306  val s1_dmWay = RegEnable(get_direct_map_way(s0_req.vaddr), s0_fire)
307
308  when (s0_fire) {
309    s1_valid := true.B
310  }.elsewhen (s1_fire) {
311    s1_valid := false.B
312  }
313  s1_ready := !s1_valid || s1_can_go
314  s1_s0_set_conflict := s1_valid && s0_idx === s1_idx
315  s1_s0_set_conflict_store := s1_valid && store_idx === s1_idx
316
317  def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
318  val meta_resp = Wire(Vec(nWays, (new Meta).asUInt))
319  meta_resp := Mux(GatedValidRegNext(s0_fire), VecInit(io.meta_resp.map(_.asUInt)), RegEnable(meta_resp, s1_valid))
320  // pseudo ecc enc tag
321  val pseudo_tag_toggle_mask = Mux(
322                                  io.pseudo_error.valid && io.pseudo_error.bits(0).valid,
323                                  io.pseudo_error.bits(0).mask(tagBits - 1, 0),
324                                  0.U(tagBits.W)
325                              )
326  val pseudo_encTag_resp = io.tag_resp.map {
327    case real_enc =>
328      if (cacheCtrlParamsOpt.nonEmpty && EnableTagEcc) {
329        val ecc = real_enc(encTagBits - 1, tagBits)
330        val toggleTag = real_enc(tagBits - 1, 0) ^ pseudo_tag_toggle_mask
331        Cat(ecc, toggleTag)
332      } else {
333        real_enc
334      }
335  }
336  val encTag_resp = Wire(io.tag_resp.cloneType)
337  encTag_resp := Mux(GatedValidRegNext(s0_fire), VecInit(pseudo_encTag_resp), RegEnable(encTag_resp, s1_valid))
338  val tag_resp = encTag_resp.map(encTag => encTag(tagBits - 1, 0))
339  val s1_meta_valids = wayMap((w: Int) => Meta(meta_resp(w)).coh.isValid()).asUInt
340  val s1_tag_errors = wayMap((w: Int) => s1_meta_valids(w) && dcacheParameters.tagCode.decode(encTag_resp(w)).error).asUInt
341  val s1_tag_eq_way = wayMap((w: Int) => tag_resp(w) === get_tag(s1_req.addr)).asUInt
342  val s1_tag_ecc_eq_way = wayMap((w: Int) => s1_tag_eq_way(w) && !s1_tag_errors(w)).asUInt
343  val s1_tag_ecc_match_way = wayMap((w: Int) => s1_tag_ecc_eq_way(w) && s1_meta_valids(w)).asUInt
344  val s1_tag_match = ParallelORR(s1_tag_ecc_match_way)
345  val s1_real_tag_match_way = Wire(UInt(nWays.W))
346  s1_real_tag_match_way := Mux(
347    GatedValidRegNext(s0_fire),
348    wayMap((w: Int) => io.tag_resp(w)(tagBits - 1, 0) === get_tag(s1_req.addr) && s1_meta_valids(w)).asUInt,
349    RegEnable(s1_real_tag_match_way, 0.U.asTypeOf(s1_real_tag_match_way.cloneType), s1_valid)
350  )
351
352  val s1_hit_tag = get_tag(s1_req.addr)
353  val s1_hit_coh = ClientMetadata(ParallelMux(s1_tag_ecc_match_way.asBools, (0 until nWays).map(w => meta_resp(w))))
354  val s1_flag_error = ParallelMux(s1_tag_ecc_match_way.asBools, (0 until nWays).map(w => io.extra_meta_resp(w).error))
355  val s1_extra_meta = ParallelMux(s1_tag_ecc_match_way.asBools, (0 until nWays).map(w => io.extra_meta_resp(w)))
356  io.pseudo_tag_error_inj_done := s1_fire && s1_meta_valids.orR
357
358  XSPerfAccumulate("probe_unused_prefetch", s1_req.probe && isFromL1Prefetch(s1_extra_meta.prefetch) && !s1_extra_meta.access) // may not be accurate
359  XSPerfAccumulate("replace_unused_prefetch", s1_req.replace && isFromL1Prefetch(s1_extra_meta.prefetch) && !s1_extra_meta.access) // may not be accurate
360
361  // replacement policy
362  val s1_invalid_vec = wayMap(w => !meta_resp(w).asTypeOf(new Meta).coh.isValid())
363  val s1_have_invalid_way = s1_invalid_vec.asUInt.orR
364  val s1_invalid_way_en = ParallelPriorityMux(s1_invalid_vec.zipWithIndex.map(x => x._1 -> UIntToOH(x._2.U(nWays.W))))
365  val s1_repl_way_en = WireInit(0.U(nWays.W))
366  s1_repl_way_en := Mux(
367    GatedValidRegNext(s0_fire),
368    UIntToOH(io.replace_way.way),
369    RegEnable(s1_repl_way_en, s1_valid)
370  )
371  val s1_repl_tag = ParallelMux(s1_repl_way_en.asBools, (0 until nWays).map(w => tag_resp(w)))
372  val s1_repl_coh = ParallelMux(s1_repl_way_en.asBools, (0 until nWays).map(w => meta_resp(w))).asTypeOf(new ClientMetadata)
373  val s1_repl_pf  = ParallelMux(s1_repl_way_en.asBools, (0 until nWays).map(w => io.extra_meta_resp(w).prefetch))
374
375  val s1_repl_way_raw = WireInit(0.U(log2Up(nWays).W))
376  s1_repl_way_raw := Mux(GatedValidRegNext(s0_fire), io.replace_way.way, RegEnable(s1_repl_way_raw, s1_valid))
377
378  val s1_need_replacement = s1_req.miss && !s1_tag_match
379  val s1_need_eviction = s1_req.miss && !s1_tag_match && s1_repl_coh.state =/= ClientStates.Nothing
380
381  val s1_no_error_way_en = Mux(s1_need_replacement, s1_repl_way_en, s1_real_tag_match_way)
382  val s1_error_way_en = Mux(ParallelORR(s1_real_tag_match_way), s1_real_tag_match_way, s1_repl_way_en)
383  val s1_way_en = Mux(io.pseudo_error.valid, s1_error_way_en, s1_no_error_way_en)
384  assert(!RegNext(s1_fire && PopCount(s1_way_en) > 1.U))
385
386  val s1_tag = s1_hit_tag
387  val s1_coh = s1_hit_coh
388
389  XSPerfAccumulate("store_has_invalid_way_but_select_valid_way", io.replace_way.set.valid && wayMap(w => !meta_resp(w).asTypeOf(new Meta).coh.isValid()).asUInt.orR && s1_need_replacement && s1_repl_coh.isValid())
390  XSPerfAccumulate("store_using_replacement", io.replace_way.set.valid && s1_need_replacement)
391
392  val s1_has_permission = s1_hit_coh.onAccess(s1_req.cmd)._1
393  val s1_hit = s1_tag_match && s1_has_permission
394  val s1_pregen_can_go_to_mq = !s1_req.replace && !s1_req.probe && !s1_req.miss && (s1_req.isStore || s1_req.isAMO && s1_req.cmd =/= M_XSC) && !s1_hit
395
396  // s2: select data, return resp if this is a store miss
397  val s2_valid = RegInit(false.B)
398  val s2_req = RegEnable(s1_req, s1_fire)
399  val s2_tag_errors = RegEnable(s1_tag_errors, s1_fire)
400  val s2_tag_match = RegEnable(s1_tag_match, s1_fire)
401  val s2_tag_match_way = RegEnable(s1_real_tag_match_way, s1_fire)
402  val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire)
403  val (s2_has_permission, _, s2_new_hit_coh) = s2_hit_coh.onAccess(s2_req.cmd)
404
405  val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire)
406  val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire)
407  val s2_repl_pf  = RegEnable(s1_repl_pf, s1_fire)
408  val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire)
409  val s2_need_eviction = RegEnable(s1_need_eviction, s1_fire)
410  val s2_need_data = RegEnable(s1_need_data, s1_fire)
411  val s2_need_tag = RegEnable(s1_need_tag, s1_fire)
412  val s2_idx = get_idx(s2_req.vaddr)
413
414  val s2_way_en = RegEnable(s1_way_en, s1_fire)
415  val s2_tag = Mux(s2_need_replacement, s2_repl_tag, RegEnable(s1_tag, s1_fire))
416  val s2_coh = Mux(s2_need_replacement, s2_repl_coh, RegEnable(s1_coh, s1_fire))
417  val s2_banked_store_wmask = RegEnable(s1_banked_store_wmask, s1_fire)
418  val s2_flag_error = RegEnable(s1_flag_error, s1_fire)
419  val s2_tag_error = WireInit(false.B)
420  val s2_l2_error = Mux(io.refill_info.valid, io.refill_info.bits.error, s2_req.error)
421  val s2_error = s2_flag_error || s2_tag_error || s2_l2_error // data_error not included
422
423  val s2_may_report_data_error = s2_need_data && s2_coh.state =/= ClientStates.Nothing
424
425  val s2_hit = s2_tag_match && s2_has_permission
426  val s2_sc = s2_req.cmd === M_XSC
427  val s2_lr = s2_req.cmd === M_XLR
428  val s2_amo_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isAMO
429  val s2_store_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isStore
430  val s2_should_not_report_ecc_error = !s2_req.miss && (s2_req.isAMO && !s2_lr || s2_req.isStore)
431
432  if(EnableTagEcc) {
433    s2_tag_error := s2_tag_errors.orR && s2_need_tag
434  }
435
436  s2_s0_set_conlict := s2_valid && s0_idx === s2_idx
437  s2_s0_set_conlict_store := s2_valid && store_idx === s2_idx
438
439  // For a store req, it either hits and goes to s3, or miss and enter miss queue immediately
440  val s2_req_miss_without_data = Mux(s2_valid, s2_req.miss && !io.refill_info.valid, false.B)
441  val s2_can_go_to_mq_replay = (s2_req_miss_without_data && RegEnable(s2_req_miss_without_data && !io.mainpipe_info.s2_replay_to_mq, false.B, s2_valid)) || io.replace_block // miss_req in s2 but refill data is invalid, can block 1 cycle
442  val s2_can_go_to_mq = RegEnable(s1_pregen_can_go_to_mq, s1_fire)
443  val s2_can_go_to_s3 = (s2_sc || s2_req.replace || s2_req.probe || (s2_req.miss && io.refill_info.valid && !io.replace_block) || (s2_req.isStore || s2_req.isAMO) && s2_hit) && s3_ready
444  assert(RegNext(!(s2_valid && s2_can_go_to_s3 && s2_can_go_to_mq && s2_can_go_to_mq_replay)))
445  val s2_can_go = s2_can_go_to_s3 || s2_can_go_to_mq || s2_can_go_to_mq_replay
446  val s2_fire = s2_valid && s2_can_go
447  val s2_fire_to_s3 = s2_valid && s2_can_go_to_s3
448  when (s1_fire) {
449    s2_valid := true.B
450  }.elsewhen (s2_fire) {
451    s2_valid := false.B
452  }
453  s2_ready := !s2_valid || s2_can_go
454  val replay = !io.miss_req.ready || io.wbq_block_miss_req
455
456  io.data_readline_can_go := GatedValidRegNext(s1_fire)
457  io.data_readline_stall := s2_valid
458  io.data_readline_can_resp := s2_fire_to_s3
459
460  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
461    val full_wmask = FillInterleaved(8, wmask)
462    ((~full_wmask & old_data) | (full_wmask & new_data))
463  }
464  val s2_merge_mask = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBytes.W)))
465  val s2_store_data_merged_without_cache = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
466  for (i <- 0 until DCacheBanks) {
467    val new_data = get_data_of_bank(i, Mux(s2_req.miss, io.refill_info.bits.store_data, s2_req.store_data))
468    // for amo hit, we should use read out SRAM data
469    // do not merge with store data
470    s2_merge_mask(i) := Mux(s2_amo_hit, 0.U(wordBytes.W), get_mask_of_bank(i, Mux(s2_req.miss, io.refill_info.bits.store_mask, s2_req.store_mask)))
471    s2_store_data_merged_without_cache(i) := mergePutData(0.U(DCacheSRAMRowBits.W), new_data, s2_merge_mask(i))
472  }
473
474  io.pseudo_data_error_inj_done := s2_fire_to_s3 && (s2_tag_error || s2_hit) && s2_may_report_data_error
475  io.pseudo_error.ready := false.B
476  XSError(s2_valid && s2_can_go_to_s3 && s2_req.miss && !io.refill_info.valid, "MainPipe req can go to s3 but no refill data")
477
478  // s3: write data, meta and tag
479  val s3_valid = RegInit(false.B)
480  val s3_req = RegEnable(s2_req, s2_fire_to_s3)
481  val s3_miss_param = RegEnable(io.refill_info.bits.miss_param, s2_fire_to_s3)
482  val s3_miss_dirty = RegEnable(io.refill_info.bits.miss_dirty, s2_fire_to_s3)
483  val s3_tag = RegEnable(s2_tag, s2_fire_to_s3)
484  val s3_tag_match = RegEnable(s2_tag_match, s2_fire_to_s3)
485  val s3_coh = RegEnable(s2_coh, s2_fire_to_s3)
486  val s3_hit = RegEnable(s2_hit, s2_fire_to_s3)
487  val s3_amo_hit = RegEnable(s2_amo_hit, s2_fire_to_s3)
488  val s3_store_hit = RegEnable(s2_store_hit, s2_fire_to_s3)
489  val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire_to_s3)
490  val s3_new_hit_coh = RegEnable(s2_new_hit_coh, s2_fire_to_s3)
491  val s3_way_en = RegEnable(s2_way_en, s2_fire_to_s3)
492  val s3_banked_store_wmask = RegEnable(s2_banked_store_wmask, s2_fire_to_s3)
493  val s3_idx = RegEnable(s2_idx, s2_fire_to_s3)
494  val s3_store_data_merged_without_cache = RegEnable(s2_store_data_merged_without_cache, s2_fire_to_s3)
495  val s3_merge_mask = RegEnable(VecInit(s2_merge_mask.map(~_)), s2_fire_to_s3)
496
497  val s3_data_resp = io.data_resp
498  val s3_data = WireInit(VecInit((0 until DCacheBanks).map(i => {
499    s3_data_resp(i).raw_data
500  })))
501  val s3_store_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
502  for (i <- 0 until DCacheBanks) {
503    // for amo hit, we should use read out SRAM data
504    // do not merge with store data
505    s3_store_data_merged(i) := mergePutData(s3_store_data_merged_without_cache(i), s3_data(i), s3_merge_mask(i))
506  }
507
508  val s3_data_word = s3_store_data_merged(s3_req.word_idx)
509  val s3_data_quad_word = VecInit((0 until DCacheBanks).map(i => {
510    if (i == (DCacheBanks - 1)) s3_store_data_merged(i)
511    else Cat(s3_store_data_merged(i + 1), s3_store_data_merged(i))
512  }))(s3_req.word_idx)
513
514  val s3_sc_fail  = Wire(Bool()) // miss or lr mismatch
515  val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire_to_s3)
516
517  val (_, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param)
518  val (_, miss_shrink_param, _) = s3_coh.onCacheControl(M_FLUSH)
519
520  val miss_update_meta = s3_req.miss
521  val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh
522  val store_update_meta = s3_req.isStore && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
523  val amo_update_meta = s3_req.isAMO && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh && !s3_sc_fail
524  val amo_wait_amoalu = s3_req.isAMO && s3_req.cmd =/= M_XLR && s3_req.cmd =/= M_XSC && !isAMOCAS(s3_req.cmd)
525  val update_meta = (miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta) && !s3_req.replace
526
527  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
528    val c = categorize(cmd)
529    MuxLookup(Cat(c, param, dirty), Nothing)(Seq(
530      //(effect param) -> (next)
531      Cat(rd, toB, false.B)  -> Branch,
532      Cat(rd, toB, true.B)   -> Branch,
533      Cat(rd, toT, false.B)  -> Trunk,
534      Cat(rd, toT, true.B)   -> Dirty,
535      Cat(wi, toT, false.B)  -> Trunk,
536      Cat(wi, toT, true.B)   -> Dirty,
537      Cat(wr, toT, false.B)  -> Dirty,
538      Cat(wr, toT, true.B)   -> Dirty))
539  }
540
541  val miss_new_coh = ClientMetadata(missCohGen(s3_req.cmd, s3_miss_param, s3_miss_dirty))
542
543  // report ecc error
544  val s3_tag_error = RegEnable(s2_tag_error, false.B, s2_fire)
545  // data_error will be reported by data array 1 cycle after data read resp
546  val s3_data_error = Wire(Bool())
547  s3_data_error := Mux(GatedValidRegNextN(s1_fire, 2), // ecc check result is generated 2 cycle after read req
548    io.readline_error_delayed && RegNext(s2_may_report_data_error),
549    RegNext(s3_data_error) // do not update s3_data_error if !s1_fire
550  )
551  val s3_l2_error = RegEnable(s2_l2_error, false.B, s2_fire)
552  val s3_flag_error = RegEnable(s2_flag_error, false.B, s2_fire)
553  // error signal for amo inst
554  // s3_error = s3_flag_error || s3_tag_error || s3_l2_error || s3_data_error
555  val s3_error = RegEnable(s2_error, 0.U.asTypeOf(s2_error), s2_fire) || s3_data_error
556  val s3_error_paddr = get_block_addr(RegEnable(Cat(s2_tag, get_untag(s2_req.vaddr)), s2_fire))
557
558  // LR, SC and AMO
559  val debug_sc_fail_addr = RegInit(0.U)
560  val debug_sc_fail_cnt  = RegInit(0.U(8.W))
561  val debug_sc_addr_match_fail_cnt  = RegInit(0.U(8.W))
562
563  val lrsc_count = RegInit(0.U(log2Ceil(LRSCCycles).W))
564  val lrsc_valid = lrsc_count > LRSCBackOff.U
565  val lrsc_addr = Reg(UInt())
566
567  val s3_s_amoalu = RegInit(false.B)
568  val s3_lr = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XLR
569  val s3_sc = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XSC
570  val s3_cas = !s3_req.probe && s3_req.isAMO && isAMOCAS(s3_req.cmd)
571  val s3_lrsc_addr_match = lrsc_valid && lrsc_addr === get_block_addr(s3_req.addr)
572  val debug_s3_sc_fail_addr_match = s3_sc && lrsc_addr === get_block_addr(s3_req.addr) && !lrsc_valid
573
574  s3_sc_fail  := s3_sc && (!s3_lrsc_addr_match || !s3_hit)
575  val s3_cas_fail = s3_cas && (FillInterleaved(8, s3_req.amo_mask) & (s3_req.amo_cmp ^ s3_data_quad_word)) =/= 0.U
576
577  val s3_can_do_amo = (s3_req.miss && !s3_req.probe && s3_req.isAMO) || s3_amo_hit
578  val s3_can_do_amo_write = s3_can_do_amo && isWrite(s3_req.cmd) && !s3_sc_fail && !s3_cas_fail
579
580  when (s3_valid && (s3_lr || s3_sc)) {
581    when (s3_can_do_amo && s3_lr) {
582      lrsc_count := (LRSCCycles - 1).U
583      lrsc_addr := get_block_addr(s3_req.addr)
584    } .otherwise {
585      lrsc_count := 0.U
586    }
587  }.elsewhen (io.invalid_resv_set) {
588    // when we release this block,
589    // we invalidate this reservation set
590    lrsc_count := 0.U
591  }.elsewhen (lrsc_count > 0.U) {
592    lrsc_count := lrsc_count - 1.U
593  }
594
595
596  io.lrsc_locked_block.valid := lrsc_valid
597  io.lrsc_locked_block.bits  := lrsc_addr
598  io.block_lr := GatedValidRegNext(lrsc_count > 0.U)
599
600  // When we update update_resv_set, block all probe req in the next cycle
601  // It should give Probe reservation set addr compare an independent cycle,
602  // which will lead to better timing
603  io.update_resv_set := s3_valid && s3_lr && s3_can_do_amo
604
605  when (s3_valid) {
606    when (s3_req.addr === debug_sc_fail_addr) {
607      when (s3_sc_fail) {
608        debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
609      } .elsewhen (s3_sc) {
610        debug_sc_fail_cnt := 0.U
611      }
612    } .otherwise {
613      when (s3_sc_fail) {
614        debug_sc_fail_addr := s3_req.addr
615        debug_sc_fail_cnt  := 1.U
616      }
617    }
618  }
619  XSWarn(debug_sc_fail_cnt > 100.U, "L1DCache failed too many SCs in a row")
620
621  when (s3_valid) {
622    when (s3_req.addr === debug_sc_fail_addr) {
623      when (debug_s3_sc_fail_addr_match) {
624        debug_sc_addr_match_fail_cnt := debug_sc_addr_match_fail_cnt + 1.U
625      } .elsewhen (s3_sc) {
626        debug_sc_addr_match_fail_cnt := 0.U
627      }
628    } .otherwise {
629      when (s3_sc_fail) {
630        debug_sc_addr_match_fail_cnt  := 1.U
631      }
632    }
633  }
634  XSError(debug_sc_addr_match_fail_cnt > 100.U, "L1DCache failed too many SCs in a row, resv set addr always match")
635
636
637  val banked_amo_wmask = UIntToOH(s3_req.word_idx)
638  val update_data = s3_req.miss || s3_store_hit || s3_can_do_amo_write
639
640  // generate write data
641  // AMO hits
642  val do_amoalu = amo_wait_amoalu && s3_valid && !s3_s_amoalu
643  val amoalu   = Module(new AMOALU(wordBits))
644  amoalu.io.mask := s3_req.amo_mask
645  amoalu.io.cmd  := s3_req.cmd
646  amoalu.io.lhs  := s3_data_word
647  amoalu.io.rhs  := s3_req.amo_data
648
649  // merge amo write data
650  val s3_amo_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W))) // exclude AMOCAS
651  val s3_sc_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
652  val s3_cas_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
653  for (i <- 0 until DCacheBanks) {
654    val old_data = s3_store_data_merged(i)
655    val new_data = amoalu.io.out
656    val wmask = Mux(
657      s3_req.word_idx === i.U,
658      ~0.U(wordBytes.W),
659      0.U(wordBytes.W)
660    )
661    s3_amo_data_merged(i) := mergePutData(old_data, new_data, wmask)
662    s3_sc_data_merged(i) := mergePutData(old_data, s3_req.amo_data,
663      Mux(s3_req.word_idx === i.U && !s3_sc_fail, s3_req.amo_mask, 0.U(wordBytes.W))
664    )
665    val l_select = !s3_cas_fail && s3_req.word_idx === i.U
666    val h_select = !s3_cas_fail && s3_req.cmd === M_XA_CASQ &&
667      (if (i % 2 == 1) s3_req.word_idx === (i - 1).U else false.B)
668    s3_cas_data_merged(i) := mergePutData(
669      old_data = old_data,
670      new_data = Mux(h_select, s3_req.amo_data >> DataBits, s3_req.amo_data.take(DataBits)),
671      wmask = Mux(
672        h_select,
673        s3_req.amo_mask >> wordBytes,
674        Mux(
675          l_select,
676          s3_req.amo_mask.take(wordBytes),
677          0.U(wordBytes.W)
678        )
679      )
680    )
681  }
682  val s3_amo_data_merged_reg = RegEnable(s3_amo_data_merged, do_amoalu)
683  val miss_wb = s3_req.miss && s3_need_replacement && s3_coh.state =/= ClientStates.Nothing
684  val probe_wb = s3_req.probe
685  val replace_wb = s3_req.replace
686  val need_wb = miss_wb || probe_wb || replace_wb
687
688  val writeback_param = Mux(probe_wb, probe_shrink_param, miss_shrink_param)
689  val writeback_data = if (dcacheParameters.alwaysReleaseData) {
690    s3_tag_match && s3_req.probe && s3_req.probe_need_data ||
691      s3_coh === ClientStates.Dirty || (miss_wb || replace_wb) && s3_coh.state =/= ClientStates.Nothing
692  } else {
693    s3_tag_match && s3_req.probe && s3_req.probe_need_data || s3_coh === ClientStates.Dirty
694  }
695
696  val s3_probe_can_go = s3_req.probe && io.wb.ready && (io.meta_write.ready || !probe_update_meta)
697  val s3_store_can_go = s3_req.source === STORE_SOURCE.U && !s3_req.probe && (io.meta_write.ready || !store_update_meta) && (io.data_write.ready || !update_data) && !s3_req.miss
698  val s3_amo_can_go = s3_amo_hit && (io.meta_write.ready || !amo_update_meta) && (io.data_write.ready || !update_data) && (s3_s_amoalu || !amo_wait_amoalu) || s3_sc_fail
699  val s3_miss_can_go = s3_req.miss &&
700    (io.meta_write.ready || !amo_update_meta) &&
701    (io.data_write.ready || !update_data) &&
702    (s3_s_amoalu || !amo_wait_amoalu) &&
703    io.tag_write.ready &&
704    io.wb.ready
705  val s3_replace_nothing = s3_req.replace && s3_coh.state === ClientStates.Nothing
706  val s3_replace_can_go = s3_req.replace && (s3_replace_nothing || io.wb.ready)
707  val s3_can_go = s3_probe_can_go || s3_store_can_go || s3_amo_can_go || s3_miss_can_go || s3_replace_can_go
708  val s3_update_data_cango = s3_store_can_go || s3_amo_can_go || s3_miss_can_go // used to speed up data_write gen
709  val s3_fire = s3_valid && s3_can_go
710  when (s2_fire_to_s3) {
711    s3_valid := true.B
712  }.elsewhen (s3_fire) {
713    s3_valid := false.B
714  }
715  when (do_amoalu) { s3_s_amoalu := true.B }
716  when (s3_fire) { s3_s_amoalu := false.B }
717
718  val s3_probe_new_coh = probe_new_coh
719  val new_coh = Mux(
720    miss_update_meta,
721    miss_new_coh,
722    Mux(
723      probe_update_meta,
724      s3_probe_new_coh,
725      Mux(
726        store_update_meta || amo_update_meta,
727        s3_new_hit_coh,
728        ClientMetadata.onReset
729      )
730    )
731  )
732  val banked_wmask = Mux(
733    s3_req.miss,
734    banked_full_wmask,
735    Mux(
736      s3_store_hit,
737      s3_banked_store_wmask,
738      Mux(
739        s3_can_do_amo_write,
740        Mux(
741          isAMOCASQ(s3_req.cmd),
742          FillInterleaved(2, UIntToOH(s3_req.quad_word_idx)),
743          UIntToOH(s3_req.word_idx)
744        ),
745        banked_none_wmask
746      )
747    )
748  )
749  assert(!(s3_valid && banked_wmask.orR && !update_data))
750
751  for (i <- 0 until DCacheBanks) {
752    val old_data = s3_store_data_merged(i)
753    s3_sc_data_merged(i) := mergePutData(old_data, s3_req.amo_data,
754      Mux(
755        s3_req.word_idx === i.U && !s3_sc_fail,
756        s3_req.amo_mask,
757        0.U(wordBytes.W)
758      )
759    )
760  }
761  for (i <- 0 until DCacheBanks) {
762    io.data_write_dup(i).valid := s3_valid && s3_update_data_cango && update_data
763    io.data_write_dup(i).bits.way_en := s3_way_en
764    io.data_write_dup(i).bits.addr := s3_req.vaddr
765  }
766
767  s3_ready := !s3_valid || s3_can_go
768  s3_s0_set_conflict := s3_valid && s3_idx === s0_idx
769  s3_s0_set_conflict_store := s3_valid && s3_idx === store_idx
770  //assert(RegNext(!s3_valid || !(s3_req.source === STORE_SOURCE.U && !s3_req.probe) || s3_hit)) // miss store should never come to s3 ,fixed(reserve)
771
772  io.meta_read.valid := req.valid
773  io.meta_read.bits.idx := get_idx(s0_req.vaddr)
774  io.meta_read.bits.way_en := Mux(s0_req.replace, s0_req.replace_way_en, ~0.U(nWays.W))
775
776  io.tag_read.valid := req.valid && !s0_req.replace
777  io.tag_read.bits.idx := get_idx(s0_req.vaddr)
778  io.tag_read.bits.way_en := ~0.U(nWays.W)
779
780  io.data_read_intend := s1_valid && s1_need_data
781  io.data_readline.valid := s1_valid && s1_need_data
782  io.data_readline.bits.rmask := s1_banked_rmask
783  io.data_readline.bits.way_en := s1_way_en
784  io.data_readline.bits.addr := s1_req.vaddr
785
786  io.miss_req.valid := s2_valid && s2_can_go_to_mq
787  val miss_req = io.miss_req.bits
788  miss_req := DontCare
789  miss_req.source := s2_req.source
790  miss_req.pf_source := L1_HW_PREFETCH_NULL
791  miss_req.cmd := s2_req.cmd
792  miss_req.addr := s2_req.addr
793  miss_req.vaddr := s2_req.vaddr
794  miss_req.store_data := s2_req.store_data
795  miss_req.store_mask := s2_req.store_mask
796  miss_req.word_idx := s2_req.word_idx
797  miss_req.amo_data := s2_req.amo_data
798  miss_req.amo_mask := s2_req.amo_mask
799  miss_req.amo_cmp  := s2_req.amo_cmp
800  miss_req.req_coh := s2_hit_coh
801  miss_req.id := s2_req.id
802  miss_req.cancel := false.B
803  miss_req.pc := DontCare
804  miss_req.full_overwrite := s2_req.isStore && s2_req.store_mask.andR
805
806  io.wbq_conflict_check.valid := s2_valid && s2_can_go_to_mq
807  io.wbq_conflict_check.bits := s2_req.addr
808
809  io.store_replay_resp.valid := s2_valid && s2_can_go_to_mq && replay && s2_req.isStore
810  io.store_replay_resp.bits.data := DontCare
811  io.store_replay_resp.bits.miss := true.B
812  io.store_replay_resp.bits.replay := true.B
813  io.store_replay_resp.bits.id := s2_req.id
814
815  io.store_hit_resp.valid := s3_valid && (s3_store_can_go || (s3_miss_can_go && s3_req.isStore))
816  io.store_hit_resp.bits.data := DontCare
817  io.store_hit_resp.bits.miss := false.B
818  io.store_hit_resp.bits.replay := false.B
819  io.store_hit_resp.bits.id := s3_req.id
820
821  val atomic_hit_resp = Wire(new MainPipeResp)
822  atomic_hit_resp.source := s3_req.source
823  atomic_hit_resp.data := Mux(s3_sc, s3_sc_fail.asUInt, s3_data_quad_word)
824  atomic_hit_resp.miss := false.B
825  atomic_hit_resp.miss_id := s3_req.miss_id
826  atomic_hit_resp.error := s3_error
827  atomic_hit_resp.replay := false.B
828  atomic_hit_resp.ack_miss_queue := s3_req.miss
829  atomic_hit_resp.id := lrsc_valid
830  val atomic_replay_resp = Wire(new MainPipeResp)
831  atomic_replay_resp.source := s2_req.source
832  atomic_replay_resp.data := DontCare
833  atomic_replay_resp.miss := true.B
834  atomic_replay_resp.miss_id := DontCare
835  atomic_replay_resp.error := false.B
836  atomic_replay_resp.replay := true.B
837  atomic_replay_resp.ack_miss_queue := false.B
838  atomic_replay_resp.id := DontCare
839
840  val atomic_replay_resp_valid = s2_valid && s2_can_go_to_mq && replay && s2_req.isAMO
841  val atomic_hit_resp_valid = s3_valid && (s3_amo_can_go || s3_miss_can_go && s3_req.isAMO)
842
843  io.atomic_resp.valid := atomic_replay_resp_valid || atomic_hit_resp_valid
844  io.atomic_resp.bits := Mux(atomic_replay_resp_valid, atomic_replay_resp, atomic_hit_resp)
845
846  // io.replace_resp.valid := s3_fire && s3_req.replace
847  // io.replace_resp.bits := s3_req.miss_id
848
849  io.meta_write.valid := s3_fire && update_meta
850  io.meta_write.bits.idx := s3_idx
851  io.meta_write.bits.way_en := s3_way_en
852  io.meta_write.bits.meta.coh := new_coh
853
854  io.error_flag_write.valid := s3_fire && update_meta && (s3_l2_error || s3_req.miss)
855  io.error_flag_write.bits.idx := s3_idx
856  io.error_flag_write.bits.way_en := s3_way_en
857  io.error_flag_write.bits.flag := s3_l2_error
858
859  // if we use (prefetch_flag && meta =/= ClientStates.Nothing) for prefetch check
860  // prefetch_flag_write can be omited
861  io.prefetch_flag_write.valid := s3_fire && s3_req.miss
862  io.prefetch_flag_write.bits.idx := s3_idx
863  io.prefetch_flag_write.bits.way_en := s3_way_en
864  io.prefetch_flag_write.bits.source := s3_req.pf_source
865
866  // regenerate repl_way & repl_coh
867  io.bloom_filter_query.set.valid := s2_fire_to_s3 && s2_req.miss && !isFromL1Prefetch(s2_repl_pf) && s2_repl_coh.isValid() && isFromL1Prefetch(s2_req.pf_source)
868  io.bloom_filter_query.set.bits.addr := io.bloom_filter_query.set.bits.get_addr(Cat(s2_repl_tag, get_untag(s2_req.vaddr))) // the evict block address
869
870  io.bloom_filter_query.clr.valid := s3_fire && isFromL1Prefetch(s3_req.pf_source)
871  io.bloom_filter_query.clr.bits.addr := io.bloom_filter_query.clr.bits.get_addr(s3_req.addr)
872
873  XSPerfAccumulate("mainpipe_update_prefetchArray", io.prefetch_flag_write.valid)
874  XSPerfAccumulate("mainpipe_s2_miss_req", s2_valid && s2_req.miss)
875  XSPerfAccumulate("mainpipe_s2_block_penalty", s2_valid && s2_req.miss && !io.refill_info.valid)
876  XSPerfAccumulate("mainpipe_s2_missqueue_replay", s2_valid && s2_can_go_to_mq_replay)
877  XSPerfAccumulate("mainpipe_slot_conflict_1_2", (s1_idx === s2_idx && s1_way_en === s2_way_en && s1_req.miss && s2_req.miss && s1_valid && s2_valid ))
878  XSPerfAccumulate("mainpipe_slot_conflict_1_3", (s1_idx === s3_idx && s1_way_en === s3_way_en && s1_req.miss && s3_req.miss && s1_valid && s3_valid))
879  XSPerfAccumulate("mainpipe_slot_conflict_2_3", (s2_idx === s3_idx && s2_way_en === s3_way_en && s2_req.miss && s3_req.miss && s2_valid && s3_valid))
880  // probe / replace will not update access bit
881  io.access_flag_write.valid := s3_fire && !s3_req.probe && !s3_req.replace
882  io.access_flag_write.bits.idx := s3_idx
883  io.access_flag_write.bits.way_en := s3_way_en
884  // io.access_flag_write.bits.flag := true.B
885  io.access_flag_write.bits.flag :=Mux(s3_req.miss, s3_req.access, true.B)
886
887  io.tag_write.valid := s3_fire && s3_req.miss
888  io.tag_write.bits.idx := s3_idx
889  io.tag_write.bits.way_en := s3_way_en
890  io.tag_write.bits.tag := get_tag(s3_req.addr)
891  io.tag_write.bits.ecc := DontCare // generate ecc code in tagArray
892  io.tag_write.bits.vaddr := s3_req.vaddr
893
894  io.tag_write_intend := s3_req.miss && s3_valid
895  XSPerfAccumulate("fake_tag_write_intend", io.tag_write_intend && !io.tag_write.valid)
896  XSPerfAccumulate("mainpipe_tag_write", io.tag_write.valid)
897
898  io.replace_addr.valid := s2_valid && s2_need_eviction
899  io.replace_addr.bits  := get_block_addr(Cat(s2_tag, get_untag(s2_req.vaddr)))
900
901  assert(!RegNext(io.tag_write.valid && !io.tag_write_intend))
902
903  io.data_write.valid := s3_valid && s3_update_data_cango && update_data
904  io.data_write.bits.way_en := s3_way_en
905  io.data_write.bits.addr := s3_req.vaddr
906  io.data_write.bits.wmask := banked_wmask
907  io.data_write.bits.data := Mux(
908    amo_wait_amoalu,
909    s3_amo_data_merged_reg,
910    Mux(
911      s3_sc,
912      s3_sc_data_merged,
913      Mux(
914        s3_cas,
915        s3_cas_data_merged,
916        s3_store_data_merged
917      )
918    )
919  )
920  //assert(RegNext(!io.meta_write.valid || !s3_req.replace))
921  assert(RegNext(!io.tag_write.valid || !s3_req.replace))
922  assert(RegNext(!io.data_write.valid || !s3_req.replace))
923
924  io.wb.valid := s3_valid && (
925    // replace
926    s3_req.replace && !s3_replace_nothing ||
927    // probe can go to wbq
928    s3_req.probe && (io.meta_write.ready || !probe_update_meta) ||
929      // amo miss can go to wbq
930      s3_req.miss &&
931        (io.meta_write.ready || !amo_update_meta) &&
932        (io.data_write.ready || !update_data) &&
933        (s3_s_amoalu || !amo_wait_amoalu) &&
934        io.tag_write.ready
935    ) && need_wb
936
937  io.wb.bits.addr := get_block_addr(Cat(s3_tag, get_untag(s3_req.vaddr)))
938  io.wb.bits.param := writeback_param
939  io.wb.bits.voluntary := s3_req.miss || s3_req.replace
940  io.wb.bits.hasData := writeback_data && !s3_tag_error
941  io.wb.bits.dirty := s3_coh === ClientStates.Dirty
942  io.wb.bits.data := s3_data.asUInt
943  io.wb.bits.corrupt := s3_tag_error || s3_data_error
944  io.wb.bits.delay_release := s3_req.replace
945  io.wb.bits.miss_id := s3_req.miss_id
946
947  // update plru in main pipe s3
948  io.replace_access.valid := GatedValidRegNext(s2_fire_to_s3) && !s3_req.probe && (s3_req.miss || ((s3_req.isAMO || s3_req.isStore) && s3_hit))
949  io.replace_access.bits.set := s3_idx
950  io.replace_access.bits.way := OHToUInt(s3_way_en)
951
952  io.replace_way.set.valid := GatedValidRegNext(s0_fire)
953  io.replace_way.set.bits := s1_idx
954  io.replace_way.dmWay := s1_dmWay
955
956  // send evict hint to sms
957  val sms_agt_evict_valid = s2_valid && s2_req.miss && s2_fire_to_s3
958  io.sms_agt_evict_req.valid := GatedValidRegNext(sms_agt_evict_valid)
959  io.sms_agt_evict_req.bits.vaddr := RegEnable(Cat(s2_repl_tag(tagBits - 1, 2), s2_req.vaddr(13,12), 0.U((VAddrBits - tagBits).W)), sms_agt_evict_valid)
960
961  // TODO: consider block policy of a finer granularity
962  io.status.s0_set.valid := req.valid
963  io.status.s0_set.bits := get_idx(s0_req.vaddr)
964  io.status.s1.valid := s1_valid
965  io.status.s1.bits.set := s1_idx
966  io.status.s1.bits.way_en := s1_way_en
967  io.status.s2.valid := s2_valid && !s2_req.replace
968  io.status.s2.bits.set := s2_idx
969  io.status.s2.bits.way_en := s2_way_en
970  io.status.s3.valid := s3_valid && !s3_req.replace
971  io.status.s3.bits.set := s3_idx
972  io.status.s3.bits.way_en := s3_way_en
973
974  for ((s, i) <- io.status_dup.zipWithIndex) {
975    s.s1.valid := s1_valid
976    s.s1.bits.set := RegEnable(get_idx(s0_req.vaddr), s0_fire)
977    s.s1.bits.way_en := s1_way_en
978    s.s2.valid := s2_valid && !RegEnable(s1_req.replace, s1_fire)
979    s.s2.bits.set := RegEnable(get_idx(s1_req.vaddr), s1_fire)
980    s.s2.bits.way_en := s2_way_en
981    s.s3.valid := s3_valid && !RegEnable(s2_req.replace, s2_fire_to_s3)
982    s.s3.bits.set := RegEnable(get_idx(s2_req.vaddr), s2_fire_to_s3)
983    s.s3.bits.way_en := RegEnable(s2_way_en, s2_fire_to_s3)
984  }
985  dontTouch(io.status_dup)
986
987  io.mainpipe_info.s2_valid := s2_valid && s2_req.miss
988  io.mainpipe_info.s2_miss_id := s2_req.miss_id
989  io.mainpipe_info.s2_replay_to_mq := s2_valid && s2_can_go_to_mq_replay
990  io.mainpipe_info.s3_valid := s3_valid
991  io.mainpipe_info.s3_miss_id := s3_req.miss_id
992  io.mainpipe_info.s3_refill_resp := RegNext(s2_valid && s2_req.miss && s2_fire_to_s3)
993
994  // report error to beu and csr, 1 cycle after read data resp
995  io.error := 0.U.asTypeOf(ValidIO(new L1CacheErrorInfo))
996  // report error, update error csr
997  io.error.valid := s3_error && GatedValidRegNext(s2_fire && !s2_should_not_report_ecc_error)
998  // only tag_error and data_error will be reported to beu
999  // l2_error should not be reported (l2 will report that)
1000  io.error.bits.report_to_beu := (s3_tag_error || s3_data_error) && RegNext(s2_fire)
1001  io.error.bits.paddr := s3_error_paddr
1002  io.error.bits.source.tag := s3_tag_error
1003  io.error.bits.source.data := s3_data_error
1004  io.error.bits.source.l2 := s3_flag_error || s3_l2_error
1005  io.error.bits.opType.store := s3_req.isStore && !s3_req.probe
1006  io.error.bits.opType.probe := s3_req.probe
1007  io.error.bits.opType.release := s3_req.replace
1008  io.error.bits.opType.atom := s3_req.isAMO && !s3_req.probe
1009
1010  val perfEvents = Seq(
1011    ("dcache_mp_req          ", s0_fire                                                      ),
1012    ("dcache_mp_total_penalty", PopCount(VecInit(Seq(s0_fire, s1_valid, s2_valid, s3_valid))))
1013  )
1014  generatePerfEvent()
1015}
1016