xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision 51f9a957bed418a09ee0e36f827174ce8ef2ac8e)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] David Kroft. "[Lockup-free instruction fetch/prefetch cache organization.]
21* (https://dl.acm.org/doi/10.5555/800052.801868)" 8th Annual Symposium on Computer Architecture (ISCA). 1981.
22***************************************************************************************/
23
24package xiangshan.cache
25
26import chisel3._
27import chisel3.util._
28import chisel3.experimental.dataview._
29import coupledL2.VaddrKey
30import coupledL2.IsKeywordKey
31import difftest._
32import freechips.rocketchip.tilelink.ClientStates._
33import freechips.rocketchip.tilelink.MemoryOpCategories._
34import freechips.rocketchip.tilelink.TLPermissions._
35import freechips.rocketchip.tilelink.TLMessages._
36import freechips.rocketchip.tilelink._
37import huancun.{AliasKey, DirtyKey, PrefetchKey}
38import org.chipsalliance.cde.config.Parameters
39import utility._
40import utils._
41import xiangshan._
42import xiangshan.mem.AddPipelineReg
43import xiangshan.mem.prefetch._
44import xiangshan.mem.trace._
45import xiangshan.mem.LqPtr
46
47class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle {
48  val source = UInt(sourceTypeWidth.W)
49  val pf_source = UInt(L1PfSourceBits.W)
50  val cmd = UInt(M_SZ.W)
51  val addr = UInt(PAddrBits.W)
52  val vaddr = UInt(VAddrBits.W)
53  val pc = UInt(VAddrBits.W)
54
55  val lqIdx = new LqPtr
56  // store
57  val full_overwrite = Bool()
58
59  // amo
60  val word_idx = UInt(log2Up(blockWords).W)
61  val amo_data   = UInt(QuadWordBits.W)
62  val amo_mask   = UInt(QuadWordBytes.W)
63  val amo_cmp    = UInt(QuadWordBits.W) // data to be compared in AMOCAS
64
65  val req_coh = new ClientMetadata
66  val id = UInt(reqIdWidth.W)
67
68  // For now, miss queue entry req is actually valid when req.valid && !cancel
69  // * req.valid is fast to generate
70  // * cancel is slow to generate, it will not be used until the last moment
71  //
72  // cancel may come from the following sources:
73  // 1. miss req blocked by writeback queue:
74  //      a writeback req of the same address is in progress
75  // 2. pmp check failed
76  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
77
78  // Req source decode
79  // Note that req source is NOT cmd type
80  // For instance, a req which isFromPrefetch may have R or W cmd
81  def isFromLoad = source === LOAD_SOURCE.U
82  def isFromStore = source === STORE_SOURCE.U
83  def isFromAMO = source === AMO_SOURCE.U
84  def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U
85  def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW
86  def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR
87  def hit = req_coh.isValid()
88}
89
90class MissReqStoreData(implicit p: Parameters) extends DCacheBundle {
91  // store data and store mask will be written to miss queue entry
92  // 1 cycle after req.fire() and meta write
93  val store_data = UInt((cfg.blockBytes * 8).W)
94  val store_mask = UInt(cfg.blockBytes.W)
95}
96
97class MissQueueRefillInfo(implicit p: Parameters) extends MissReqStoreData {
98  // refill_info for mainpipe req awake
99  val miss_param = UInt(TLPermissions.bdWidth.W)
100  val miss_dirty = Bool()
101  val error      = Bool()
102}
103
104class MissReq(implicit p: Parameters) extends MissReqWoStoreData {
105  // store data and store mask will be written to miss queue entry
106  // 1 cycle after req.fire() and meta write
107  val store_data = UInt((cfg.blockBytes * 8).W)
108  val store_mask = UInt(cfg.blockBytes.W)
109
110  def toMissReqStoreData(): MissReqStoreData = {
111    val out = Wire(new MissReqStoreData)
112    out.store_data := store_data
113    out.store_mask := store_mask
114    out
115  }
116
117  def toMissReqWoStoreData(): MissReqWoStoreData = {
118    this.viewAsSupertype(new MissReqWoStoreData)
119  }
120}
121
122class MissResp(implicit p: Parameters) extends DCacheBundle {
123  val id = UInt(log2Up(cfg.nMissEntries).W)
124  // cache miss request is handled by miss queue, either merged or newly allocated
125  val handled = Bool()
126  // cache req missed, merged into one of miss queue entries
127  // i.e. !miss_merged means this access is the first miss for this cacheline
128  val merged = Bool()
129}
130
131
132/**
133  * miss queue enq logic: enq is now splited into 2 cycles
134  *  +---------------------------------------------------------------------+    pipeline reg  +-------------------------+
135  *  +         s0: enq source arbiter, judge mshr alloc or merge           +     +-------+    + s1: real alloc or merge +
136  *  +                      +-----+          primary_fire?       ->        +     | alloc |    +                         +
137  *  + mainpipe  -> req0 -> |     |          secondary_fire?     ->        +     | merge |    +                         +
138  *  + loadpipe0 -> req1 -> | arb | -> req                       ->        +  -> | req   | -> +                         +
139  *  + loadpipe1 -> req2 -> |     |          mshr id             ->        +     | id    |    +                         +
140  *  +                      +-----+                                        +     +-------+    +                         +
141  *  +---------------------------------------------------------------------+                  +-------------------------+
142  */
143
144// a pipeline reg between MissReq and MissEntry
145class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle
146 with HasCircularQueuePtrHelper
147 {
148  val req           = new MissReq
149  // this request is about to merge to an existing mshr
150  val merge         = Bool()
151  // this request is about to allocate a new mshr
152  val alloc         = Bool()
153  val cancel        = Bool()
154  val mshr_id       = UInt(log2Up(cfg.nMissEntries).W)
155
156  def reg_valid(): Bool = {
157    (merge || alloc)
158  }
159
160  def matched(new_req: MissReq): Bool = {
161    val block_match = get_block(req.addr) === get_block(new_req.addr)
162    block_match && reg_valid() && !(req.isFromPrefetch)
163  }
164
165  def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = {
166    val block_match = get_block(req.addr) === get_block(new_req.addr)
167    new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch)
168  }
169
170  def reject_req(new_req: MissReq): Bool = {
171    val block_match = get_block(req.addr) === get_block(new_req.addr)
172    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
173    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
174    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
175    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
176
177    val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr)
178
179    Mux(
180        alloc,
181        block_match && (!alias_match || !(merge_load || merge_store)),
182        false.B
183      )
184  }
185
186  def merge_req(new_req: MissReq): Bool = {
187    val block_match = get_block(req.addr) === get_block(new_req.addr)
188    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
189    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
190    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
191    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
192    Mux(
193        alloc,
194        block_match && alias_match && (merge_load || merge_store),
195        false.B
196      )
197  }
198
199  def merge_isKeyword(new_req: MissReq): Bool = {
200    val load_merge_load  = merge_req(new_req) && req.isFromLoad  && new_req.isFromLoad
201    val store_merge_load = merge_req(new_req) && req.isFromStore && new_req.isFromLoad
202    val load_merge_load_use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
203    val use_new_req_isKeyword = (load_merge_load && load_merge_load_use_new_req_isKeyword) || store_merge_load
204    Mux (
205      use_new_req_isKeyword,
206        new_req.vaddr(5).asBool,
207        req.vaddr(5).asBool
208      )
209  }
210
211  def isKeyword(): Bool= {
212    val alloc_isKeyword = Mux(
213                           alloc,
214                           Mux(
215                            req.isFromLoad,
216                            req.vaddr(5).asBool,
217                            false.B),
218                            false.B)
219    Mux(
220      merge_req(req),
221      merge_isKeyword(req),
222      alloc_isKeyword
223    )
224  }
225  // send out acquire as soon as possible
226  // if a new store miss req is about to merge into this pipe reg, don't send acquire now
227  def can_send_acquire(valid: Bool, new_req: MissReq): Bool = {
228    alloc && !(valid && merge_req(new_req) && new_req.isFromStore)
229  }
230
231  def get_acquire(l2_pf_store_only: Bool): TLBundleA = {
232    val acquire = Wire(new TLBundleA(edge.bundle))
233    val grow_param = req.req_coh.onAccess(req.cmd)._2
234    val acquireBlock = edge.AcquireBlock(
235      fromSource = mshr_id,
236      toAddress = get_block_addr(req.addr),
237      lgSize = (log2Up(cfg.blockBytes)).U,
238      growPermissions = grow_param
239    )._2
240    val acquirePerm = edge.AcquirePerm(
241      fromSource = mshr_id,
242      toAddress = get_block_addr(req.addr),
243      lgSize = (log2Up(cfg.blockBytes)).U,
244      growPermissions = grow_param
245    )._2
246    acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock)
247    // resolve cache alias by L2
248    acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12))
249    // pass vaddr to l2
250    acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits))
251
252    // miss req pipe reg pass keyword to L2, is priority
253    acquire.echo.lift(IsKeywordKey).foreach(_ := isKeyword())
254
255    // trigger prefetch
256    acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B))
257    // req source
258    when(req.isFromLoad) {
259      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
260    }.elsewhen(req.isFromStore) {
261      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
262    }.elsewhen(req.isFromAMO) {
263      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
264    }.otherwise {
265      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
266    }
267
268    acquire
269  }
270
271  def block_match(release_addr: UInt): Bool = {
272    reg_valid() && get_block(req.addr) === get_block(release_addr)
273  }
274}
275
276class CMOUnit(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
277  val io = IO(new Bundle() {
278    val req = Flipped(DecoupledIO(new CMOReq))
279    val req_chanA = DecoupledIO(new TLBundleA(edge.bundle))
280    val resp_chanD = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
281    val resp_to_lsq = DecoupledIO(new CMOResp)
282  })
283
284  val s_idle :: s_sreq :: s_wresp :: s_lsq_resp :: Nil = Enum(4)
285  val state = RegInit(s_idle)
286  val state_next = WireInit(state)
287  val req = RegEnable(io.req.bits, io.req.fire)
288  val nderr = RegInit(false.B)
289
290  state := state_next
291
292  switch (state) {
293    is(s_idle) {
294      when (io.req.fire) {
295        state_next := s_sreq
296        nderr := false.B
297      }
298    }
299    is(s_sreq) {
300      when (io.req_chanA.fire) {
301        state_next := s_wresp
302      }
303    }
304    is(s_wresp) {
305      when (io.resp_chanD.fire) {
306        state_next := s_lsq_resp
307        nderr := io.resp_chanD.bits.denied || io.resp_chanD.bits.corrupt
308      }
309    }
310    is(s_lsq_resp) {
311      when (io.resp_to_lsq.fire) {
312        state_next := s_idle
313      }
314    }
315  }
316
317  io.req.ready := state === s_idle
318
319  io.req_chanA.valid := state === s_sreq
320  io.req_chanA.bits := edge.CacheBlockOperation(
321    fromSource = (cfg.nMissEntries + 1).U,
322    toAddress = req.address,
323    lgSize = (log2Up(cfg.blockBytes)).U,
324    opcode = req.opcode
325  )._2
326
327  io.resp_chanD.ready := state === s_wresp
328
329  io.resp_to_lsq.valid := state === s_lsq_resp
330  io.resp_to_lsq.bits.address := req.address
331  io.resp_to_lsq.bits.nderr   := nderr
332
333  assert(!(state =/= s_idle && io.req.valid))
334  assert(!(state =/= s_wresp && io.resp_chanD.valid))
335}
336
337class MissEntry(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
338  with HasCircularQueuePtrHelper
339 {
340  val io = IO(new Bundle() {
341    val hartId = Input(UInt(hartIdLen.W))
342    // MSHR ID
343    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
344    // client requests
345    // MSHR update request, MSHR state and addr will be updated when req.fire
346    val req = Flipped(ValidIO(new MissReqWoStoreData))
347    val wbq_block_miss_req = Input(Bool())
348    // pipeline reg
349    val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge))
350    // allocate this entry for new req
351    val primary_valid = Input(Bool())
352    // this entry is free and can be allocated to new reqs
353    val primary_ready = Output(Bool())
354    // this entry is busy, but it can merge the new req
355    val secondary_ready = Output(Bool())
356    // this entry is busy and it can not merge the new req
357    val secondary_reject = Output(Bool())
358    // way selected for replacing, used to support plru update
359    // bus
360    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
361    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
362    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
363
364    val queryME = Vec(reqNum, Flipped(new DCacheMEQueryIOBundle))
365
366    // send refill info to load queue, useless now
367    val refill_to_ldq = ValidIO(new Refill)
368
369    // replace pipe
370    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
371
372    // main pipe: amo miss
373    val main_pipe_req = DecoupledIO(new MainPipeReq)
374    val main_pipe_resp = Input(Bool())
375    val main_pipe_refill_resp = Input(Bool())
376    val main_pipe_replay = Input(Bool())
377
378    // for main pipe s2
379    val refill_info = ValidIO(new MissQueueRefillInfo)
380
381    val block_addr = ValidIO(UInt(PAddrBits.W))
382
383    val req_addr = ValidIO(UInt(PAddrBits.W))
384
385    val req_handled_by_this_entry = Output(Bool())
386
387    val forwardInfo = Output(new MissEntryForwardIO)
388    val l2_pf_store_only = Input(Bool())
389
390    // whether the pipeline reg has send out an acquire
391    val acquire_fired_by_pipe_reg = Input(Bool())
392    val memSetPattenDetected = Input(Bool())
393
394    val perf_pending_prefetch = Output(Bool())
395    val perf_pending_normal   = Output(Bool())
396
397    val rob_head_query = new DCacheBundle {
398      val vaddr = Input(UInt(VAddrBits.W))
399      val query_valid = Input(Bool())
400
401      val resp = Output(Bool())
402
403      def hit(e_vaddr: UInt): Bool = {
404        require(e_vaddr.getWidth == VAddrBits)
405        query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset)
406      }
407    }
408
409    val latency_monitor = new DCacheBundle {
410      val load_miss_refilling  = Output(Bool())
411      val store_miss_refilling = Output(Bool())
412      val amo_miss_refilling   = Output(Bool())
413      val pf_miss_refilling    = Output(Bool())
414    }
415
416    val prefetch_info = new DCacheBundle {
417      val late_prefetch = Output(Bool())
418    }
419    val nMaxPrefetchEntry = Input(UInt(64.W))
420    val matched = Output(Bool())
421    val l1Miss = Output(Bool())
422  })
423
424  assert(!RegNext(io.primary_valid && !io.primary_ready))
425
426  val req = Reg(new MissReqWoStoreData)
427  val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use
428  val req_store_mask = Reg(UInt(cfg.blockBytes.W))
429  val req_valid = RegInit(false.B)
430  val set = addr_to_dcache_set(req.vaddr)
431  // initial keyword
432  val isKeyword = RegInit(false.B)
433
434  val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req
435
436  val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd)
437
438  val s_acquire = RegInit(true.B)
439  val s_grantack = RegInit(true.B)
440  val s_mainpipe_req = RegInit(true.B)
441
442  val w_grantfirst = RegInit(true.B)
443  val w_grantlast = RegInit(true.B)
444  val w_mainpipe_resp = RegInit(true.B)
445  val w_refill_resp = RegInit(true.B)
446  val w_l2hint = RegInit(true.B)
447
448  val mainpipe_req_fired = RegInit(true.B)
449
450  val release_entry = s_grantack && w_mainpipe_resp && w_refill_resp
451
452  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
453  val data_not_refilled = !w_grantfirst
454
455  val error = RegInit(false.B)
456  val prefetch = RegInit(false.B)
457  val access = RegInit(false.B)
458
459  val should_refill_data_reg =  Reg(Bool())
460  val should_refill_data = WireInit(should_refill_data_reg)
461
462  val should_replace = RegInit(false.B)
463
464  val full_overwrite = Reg(Bool())
465
466  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
467  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
468
469  // refill data with store data, this reg will be used to store:
470  // 1. store data (if needed), before l2 refill data
471  // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array)
472  val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W)))
473  // raw data refilled to l1 by l2
474  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
475
476  // allocate current miss queue entry for a miss req
477  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel && !io.wbq_block_miss_req)
478  val primary_accept = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
479  // merge miss req to current miss queue entry
480  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel && !io.wbq_block_miss_req)
481  val secondary_accept = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
482
483  val req_handled_by_this_entry = primary_accept || secondary_accept
484
485  // for perf use
486  val secondary_fired = RegInit(false.B)
487
488  io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired
489  io.perf_pending_normal   := req_valid && (!prefetch || secondary_fired)
490
491  io.rob_head_query.resp   := io.rob_head_query.hit(req.vaddr) && req_valid
492
493  io.req_handled_by_this_entry := req_handled_by_this_entry
494
495  when (release_entry && req_valid) {
496    req_valid := false.B
497  }
498
499  when (io.miss_req_pipe_reg.alloc && !io.miss_req_pipe_reg.cancel) {
500    assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated")
501    req_valid := true.B
502
503    req := miss_req_pipe_reg_bits.toMissReqWoStoreData()
504    req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData()
505    req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
506    //only  load miss need keyword
507    isKeyword := Mux(miss_req_pipe_reg_bits.isFromLoad, miss_req_pipe_reg_bits.vaddr(5).asBool,false.B)
508
509    s_acquire := io.acquire_fired_by_pipe_reg
510    s_grantack := false.B
511    s_mainpipe_req := false.B
512
513    w_grantfirst := false.B
514    w_grantlast := false.B
515    w_l2hint := false.B
516    mainpipe_req_fired := false.B
517
518    when(miss_req_pipe_reg_bits.isFromStore) {
519      req_store_mask := miss_req_pipe_reg_bits.store_mask
520      for (i <- 0 until blockRows) {
521        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
522      }
523    }
524    full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
525
526    when (!miss_req_pipe_reg_bits.isFromAMO) {
527      w_refill_resp := false.B
528    }
529
530    when (miss_req_pipe_reg_bits.isFromAMO) {
531      w_mainpipe_resp := false.B
532    }
533
534    should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad
535    error := false.B
536    prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid)
537    access := false.B
538    secondary_fired := false.B
539  }
540
541  when (io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel) {
542    assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged")
543    assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access))
544    assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO))
545    // use the most uptodate meta
546    req.req_coh := miss_req_pipe_reg_bits.req_coh
547
548    isKeyword := Mux(
549      before_req_sent_can_merge(miss_req_pipe_reg_bits),
550      before_req_sent_merge_iskeyword(miss_req_pipe_reg_bits),
551      isKeyword)
552    assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!")
553
554    when (miss_req_pipe_reg_bits.isFromStore) {
555      req := miss_req_pipe_reg_bits
556      req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
557      req_store_mask := miss_req_pipe_reg_bits.store_mask
558      for (i <- 0 until blockRows) {
559        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
560      }
561      full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
562      assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store")
563    }
564
565    should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad
566    should_refill_data_reg := should_refill_data
567    when (!input_req_is_prefetch) {
568      access := true.B // when merge non-prefetch req, set access bit
569    }
570    secondary_fired := true.B
571  }
572
573  when (io.mem_acquire.fire) {
574    s_acquire := true.B
575  }
576
577  // merge data refilled by l2 and store data, update miss queue entry, gen refill_req
578  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
579  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
580  // merge refilled data and store data (if needed)
581  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
582    val full_wmask = FillInterleaved(8, wmask)
583    (~full_wmask & old_data | full_wmask & new_data)
584  }
585  for (i <- 0 until blockRows) {
586    // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
587    new_data(i) := refill_and_store_data(i)
588    // we only need to merge data for Store
589    new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
590  }
591
592  val hasData = RegInit(true.B)
593  val isDirty = RegInit(false.B)
594  when (io.mem_grant.fire) {
595    w_grantfirst := true.B
596    grant_param := io.mem_grant.bits.param
597    when (edge.hasData(io.mem_grant.bits)) {
598      // GrantData
599      when (isKeyword) {
600       for (i <- 0 until beatRows) {
601         val idx = ((refill_count << log2Floor(beatRows)) + i.U) ^ 4.U
602         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
603         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
604        }
605      }
606      .otherwise{
607       for (i <- 0 until beatRows) {
608         val idx = (refill_count << log2Floor(beatRows)) + i.U
609         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
610         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
611        }
612      }
613      w_grantlast := w_grantlast || refill_done
614      hasData := true.B
615    }.otherwise {
616      // Grant
617      assert(full_overwrite)
618      for (i <- 0 until blockRows) {
619        refill_and_store_data(i) := new_data(i)
620      }
621      w_grantlast := true.B
622      hasData := false.B
623    }
624
625    error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error
626
627    refill_data_raw(refill_count ^ isKeyword) := io.mem_grant.bits.data
628    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
629  }
630
631  when (io.mem_finish.fire) {
632    s_grantack := true.B
633  }
634
635  when (io.main_pipe_req.fire) {
636    s_mainpipe_req := true.B
637    mainpipe_req_fired := true.B
638  }
639
640  when (io.main_pipe_replay) {
641    s_mainpipe_req := false.B
642  }
643
644  when (io.main_pipe_resp) {
645    w_mainpipe_resp := true.B
646  }
647
648  when(io.main_pipe_refill_resp) {
649    w_refill_resp := true.B
650  }
651
652  when (io.l2_hint.valid) {
653    w_l2hint := true.B
654  }
655
656  def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = {
657    // acquire_not_sent && (new_req.isFromLoad || new_req.isFromStore)
658
659    // Since most acquire requests have been issued from pipe_reg,
660    // the number of such merge situations is currently small,
661    // So dont Merge anything for better timing.
662    false.B
663  }
664
665  def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = {
666    data_not_refilled && new_req.isFromLoad
667  }
668
669  // Note that late prefetch will be ignored
670
671  def should_merge(new_req: MissReqWoStoreData): Bool = {
672    val block_match = get_block(req.addr) === get_block(new_req.addr)
673    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
674    block_match && alias_match &&
675    (
676      before_req_sent_can_merge(new_req) ||
677      before_data_refill_can_merge(new_req)
678    )
679  }
680
681  def before_req_sent_merge_iskeyword(new_req: MissReqWoStoreData): Bool = {
682    val need_check_isKeyword = acquire_not_sent && req.isFromLoad && new_req.isFromLoad && should_merge(new_req)
683    val use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
684    Mux(
685      need_check_isKeyword,
686      Mux(
687        use_new_req_isKeyword,
688        new_req.vaddr(5).asBool,
689        req.vaddr(5).asBool
690      ),
691      isKeyword
692      )
693  }
694
695  // store can be merged before io.mem_acquire.fire
696  // store can not be merged the cycle that io.mem_acquire.fire
697  // load can be merged before io.mem_grant.fire
698  //
699  // TODO: merge store if possible? mem_acquire may need to be re-issued,
700  // but sbuffer entry can be freed
701  def should_reject(new_req: MissReqWoStoreData): Bool = {
702    val block_match = get_block(req.addr) === get_block(new_req.addr)
703    val set_match = set === addr_to_dcache_set(new_req.vaddr)
704    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
705
706    req_valid && Mux(
707        block_match,
708        (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match,
709        false.B
710      )
711  }
712
713  // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req
714  when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
715    // can accept prefetch req
716    io.primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
717  }.otherwise {
718    // cannot accept prefetch req except when a memset patten is detected
719    io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !GatedValidRegNext(primary_fire)
720  }
721  io.secondary_ready := should_merge(io.req.bits)
722  io.secondary_reject := should_reject(io.req.bits)
723
724  // generate primary_ready & secondary_(ready | reject) for each miss request
725  for (i <- 0 until reqNum) {
726    when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
727      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
728    }.otherwise {
729      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire) &&
730                                    (!io.queryME(i).req.bits.isFromPrefetch || io.memSetPattenDetected)
731    }
732    io.queryME(i).secondary_ready  := should_merge(io.queryME(i).req.bits)
733    io.queryME(i).secondary_reject := should_reject(io.queryME(i).req.bits)
734  }
735
736  // should not allocate, merge or reject at the same time
737  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U || !io.req.valid))
738
739  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
740    val data = refill_and_store_data.asUInt
741    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
742  })))
743  // when granted data is all ready, wakeup lq's miss load
744  val refill_to_ldq_en = !w_grantlast && io.mem_grant.fire
745  io.refill_to_ldq.valid := GatedValidRegNext(refill_to_ldq_en)
746  io.refill_to_ldq.bits.addr := RegEnable(req.addr + ((refill_count ^ isKeyword) << refillOffBits), refill_to_ldq_en)
747  io.refill_to_ldq.bits.data := refill_data_splited(RegEnable(refill_count ^ isKeyword, refill_to_ldq_en))
748  io.refill_to_ldq.bits.error := RegEnable(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied, refill_to_ldq_en)
749  io.refill_to_ldq.bits.refill_done := RegEnable(refill_done && io.mem_grant.fire, refill_to_ldq_en)
750  io.refill_to_ldq.bits.hasdata := hasData
751  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
752  io.refill_to_ldq.bits.id := io.id
753
754  // if the entry has a pending merge req, wait for it
755  // Note: now, only wait for store, because store may acquire T
756  io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
757  val grow_param = req.req_coh.onAccess(req.cmd)._2
758  val acquireBlock = edge.AcquireBlock(
759    fromSource = io.id,
760    toAddress = req.addr,
761    lgSize = (log2Up(cfg.blockBytes)).U,
762    growPermissions = grow_param
763  )._2
764  val acquirePerm = edge.AcquirePerm(
765    fromSource = io.id,
766    toAddress = req.addr,
767    lgSize = (log2Up(cfg.blockBytes)).U,
768    growPermissions = grow_param
769  )._2
770  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
771  // resolve cache alias by L2
772  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
773  // pass vaddr to l2
774  io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits))
775  // pass keyword to L2
776  io.mem_acquire.bits.echo.lift(IsKeywordKey).foreach(_ := isKeyword)
777  // trigger prefetch
778  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B))
779  // req source
780  when(prefetch && !secondary_fired) {
781    io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
782  }.otherwise {
783    when(req.isFromStore) {
784      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
785    }.elsewhen(req.isFromLoad) {
786      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
787    }.elsewhen(req.isFromAMO) {
788      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
789    }.otherwise {
790      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
791    }
792  }
793  require(nSets <= 256)
794
795  // io.mem_grant.ready := !w_grantlast && s_acquire
796  io.mem_grant.ready := true.B
797  assert(!(io.mem_grant.valid && !(!w_grantlast && s_acquire)), "dcache should always be ready for mem_grant now")
798
799  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire)
800  assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits)))
801  io.mem_finish.valid := !s_grantack && w_grantfirst
802  io.mem_finish.bits := grantack
803
804  // Send mainpipe_req when receive hint from L2 or receive data without hint
805  io.main_pipe_req.valid := !s_mainpipe_req && (w_l2hint || w_grantlast)
806  io.main_pipe_req.bits := DontCare
807  io.main_pipe_req.bits.miss := true.B
808  io.main_pipe_req.bits.miss_id := io.id
809  io.main_pipe_req.bits.probe := false.B
810  io.main_pipe_req.bits.source := req.source
811  io.main_pipe_req.bits.cmd := req.cmd
812  io.main_pipe_req.bits.vaddr := req.vaddr
813  io.main_pipe_req.bits.addr := req.addr
814  io.main_pipe_req.bits.word_idx := req.word_idx
815  io.main_pipe_req.bits.amo_data := req.amo_data
816  io.main_pipe_req.bits.amo_mask := req.amo_mask
817  io.main_pipe_req.bits.amo_cmp  := req.amo_cmp
818  io.main_pipe_req.bits.id := req.id
819  io.main_pipe_req.bits.pf_source := req.pf_source
820  io.main_pipe_req.bits.access := access
821
822  io.block_addr.valid := req_valid && w_grantlast
823  io.block_addr.bits := req.addr
824
825  io.req_addr.valid := req_valid
826  io.req_addr.bits := req.addr
827
828  io.refill_info.valid := req_valid && w_grantlast
829  io.refill_info.bits.store_data := refill_and_store_data.asUInt
830  io.refill_info.bits.store_mask := ~0.U(blockBytes.W)
831  io.refill_info.bits.miss_param := grant_param
832  io.refill_info.bits.miss_dirty := isDirty
833  io.refill_info.bits.error      := error
834
835  XSPerfAccumulate("miss_refill_mainpipe_req", io.main_pipe_req.fire)
836  XSPerfAccumulate("miss_refill_without_hint", io.main_pipe_req.fire && !mainpipe_req_fired && !w_l2hint)
837  XSPerfAccumulate("miss_refill_replay", io.main_pipe_replay)
838
839  val w_grantfirst_forward_info = Mux(isKeyword, w_grantlast, w_grantfirst)
840  val w_grantlast_forward_info = Mux(isKeyword, w_grantfirst, w_grantlast)
841  io.forwardInfo.inflight := req_valid
842  io.forwardInfo.paddr := req.addr
843  io.forwardInfo.raw_data := refill_and_store_data
844  io.forwardInfo.firstbeat_valid := w_grantfirst_forward_info
845  io.forwardInfo.lastbeat_valid := w_grantlast_forward_info
846  io.forwardInfo.corrupt := error
847
848  io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch
849  io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch
850
851  when(io.prefetch_info.late_prefetch) {
852    prefetch := false.B
853  }
854
855  io.l1Miss := req_valid
856  // refill latency monitor
857  val start_counting = GatedValidRegNext(io.mem_acquire.fire) || (GatedValidRegNextN(primary_fire, 2) && s_acquire)
858  io.latency_monitor.load_miss_refilling  := req_valid && req_primary_fire.isFromLoad     && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
859  io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore    && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
860  io.latency_monitor.amo_miss_refilling   := req_valid && req_primary_fire.isFromAMO      && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
861  io.latency_monitor.pf_miss_refilling    := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
862
863  XSPerfAccumulate("miss_req_primary", primary_fire)
864  XSPerfAccumulate("miss_req_merged", secondary_fire)
865  XSPerfAccumulate("load_miss_penalty_to_use",
866    should_refill_data &&
867      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
868  )
869  XSPerfAccumulate("penalty_between_grantlast_and_release",
870    BoolStopWatch(!RegNext(w_grantlast) && w_grantlast, release_entry, true)
871  )
872  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp))
873  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
874  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
875  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
876  XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
877  XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
878  XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
879
880  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(GatedValidRegNextN(primary_fire, 2), release_entry)
881  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
882  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
883
884  val load_miss_begin = primary_fire && io.req.bits.isFromLoad
885  val refill_finished = GatedValidRegNext(!w_grantlast && refill_done) && should_refill_data
886  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
887  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
888  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
889  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 100, 400, 30, true, false)
890
891  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, GatedValidRegNext(io.mem_grant.fire && refill_done))
892  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
893  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
894}
895
896class MissQueue(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
897  with HasPerfEvents
898  {
899  val io = IO(new Bundle {
900    val hartId = Input(UInt(hartIdLen.W))
901    val req = Flipped(DecoupledIO(new MissReq))
902    val resp = Output(new MissResp)
903    val refill_to_ldq = ValidIO(new Refill)
904
905    // cmo req
906    val cmo_req = Flipped(DecoupledIO(new CMOReq))
907    val cmo_resp = DecoupledIO(new CMOResp)
908
909    val queryMQ = Vec(reqNum, Flipped(new DCacheMQQueryIOBundle))
910
911    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
912    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
913    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
914
915    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
916
917    val main_pipe_req = DecoupledIO(new MainPipeReq)
918    val main_pipe_resp = Flipped(ValidIO(new MainPipeResp))
919
920    val mainpipe_info = Input(new MainPipeInfoToMQ)
921    val refill_info = ValidIO(new MissQueueRefillInfo)
922
923    // block probe
924    val probe_addr = Input(UInt(PAddrBits.W))
925    val probe_block = Output(Bool())
926
927    // block replace when release an addr valid in mshr
928    val replace_addr = Flipped(ValidIO(UInt(PAddrBits.W)))
929    val replace_block = Output(Bool())
930
931    // req blocked by wbq
932    val wbq_block_miss_req = Input(Bool())
933
934    val full = Output(Bool())
935
936    // forward missqueue
937    val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO)
938    val l2_pf_store_only = Input(Bool())
939
940    val memSetPattenDetected = Output(Bool())
941    val lqEmpty = Input(Bool())
942
943    val prefetch_info = new Bundle {
944      val naive = new Bundle {
945        val late_miss_prefetch = Output(Bool())
946      }
947
948      val fdp = new Bundle {
949        val late_miss_prefetch = Output(Bool())
950        val prefetch_monitor_cnt = Output(Bool())
951        val total_prefetch = Output(Bool())
952      }
953    }
954
955    val debugTopDown = new DCacheTopDownIO
956    val l1Miss = Output(Bool())
957  })
958
959  // 128KBL1: FIXME: provide vaddr for l2
960
961  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge, reqNum)))
962  val cmo_unit = Module(new CMOUnit(edge))
963
964  val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge)))
965  val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire))
966
967  val primary_ready_vec = entries.map(_.io.primary_ready)
968  val secondary_ready_vec = entries.map(_.io.secondary_ready)
969  val secondary_reject_vec = entries.map(_.io.secondary_reject)
970  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
971
972  val merge = ParallelORR(Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits))))
973  val reject = ParallelORR(Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits))))
974  val alloc = !reject && !merge && ParallelORR(Cat(primary_ready_vec))
975  val accept = alloc || merge
976
977  // generate req_ready for each miss request for better timing
978  for (i <- 0 until reqNum) {
979    val _primary_ready_vec = entries.map(_.io.queryME(i).primary_ready)
980    val _secondary_ready_vec = entries.map(_.io.queryME(i).secondary_ready)
981    val _secondary_reject_vec = entries.map(_.io.queryME(i).secondary_reject)
982    val _merge = ParallelORR(Cat(_secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.queryMQ(i).req.bits))))
983    val _reject = ParallelORR(Cat(_secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.queryMQ(i).req.bits))))
984    val _alloc = !_reject && !_merge && ParallelORR(Cat(_primary_ready_vec))
985    val _accept = _alloc || _merge
986
987    io.queryMQ(i).ready := _accept
988  }
989
990  val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry)
991  // merged to pipeline reg
992  val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) && io.req.valid
993  assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg")
994  assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req")
995  io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id)
996  io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled
997  io.resp.merged := merge
998
999  /*  MissQueue enq logic is now splitted into 2 cycles
1000   *
1001   */
1002  when(io.req.valid){
1003    miss_req_pipe_reg.req     := io.req.bits
1004  }
1005  // miss_req_pipe_reg.req     := io.req.bits
1006  miss_req_pipe_reg.alloc   := alloc && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
1007  miss_req_pipe_reg.merge   := merge && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
1008  miss_req_pipe_reg.cancel  := io.wbq_block_miss_req
1009  miss_req_pipe_reg.mshr_id := io.resp.id
1010
1011  assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!")
1012
1013  val source_except_load_cnt = RegInit(0.U(10.W))
1014  when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) {
1015    when(io.req.bits.isFromLoad) {
1016      source_except_load_cnt := 0.U
1017    }.otherwise {
1018      when(io.req.bits.isFromStore) {
1019        source_except_load_cnt := source_except_load_cnt + 1.U
1020      }
1021    }
1022  }
1023  val Threshold = 8
1024  val memSetPattenDetected = GatedValidRegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty)
1025
1026  io.memSetPattenDetected := memSetPattenDetected
1027
1028  val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo))
1029  (0 until LoadPipelineWidth).map(i => {
1030    val id = io.forward(i).mshrid
1031    val req_valid = io.forward(i).valid
1032    val paddr = io.forward(i).paddr
1033
1034    val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr)
1035    io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr)
1036    io.forward(i).forward_mshr := forward_mshr
1037    io.forward(i).forwardData := forwardData
1038    io.forward(i).corrupt := RegNext(forwardInfo_vec(id).corrupt)
1039  })
1040
1041  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U || !io.req.valid))
1042//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
1043  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
1044  // That is, a coming req has the same paddr as that of mshr_0 (merge),
1045  // while it has the same set and the same way as mshr_1 (reject).
1046  // In this situation, the coming req should be merged by mshr_0
1047//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
1048
1049  def select_valid_one[T <: Bundle](
1050    in: Seq[DecoupledIO[T]],
1051    out: DecoupledIO[T],
1052    name: Option[String] = None): Unit = {
1053
1054    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
1055    out.valid := Cat(in.map(_.valid)).orR
1056    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
1057    in.map(_.ready := out.ready)
1058    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
1059  }
1060
1061  io.mem_grant.ready := false.B
1062
1063  val nMaxPrefetchEntry = Constantin.createRecord(s"nMaxPrefetchEntry${p(XSCoreParamsKey).HartId}", initValue = 14)
1064  entries.zipWithIndex.foreach {
1065    case (e, i) =>
1066      val former_primary_ready = if(i == 0)
1067        false.B
1068      else
1069        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
1070
1071      e.io.hartId := io.hartId
1072      e.io.id := i.U
1073      e.io.l2_pf_store_only := io.l2_pf_store_only
1074      e.io.req.valid := io.req.valid
1075      e.io.wbq_block_miss_req := io.wbq_block_miss_req
1076      e.io.primary_valid := io.req.valid &&
1077        !merge &&
1078        !reject &&
1079        !former_primary_ready &&
1080        e.io.primary_ready
1081      e.io.req.bits := io.req.bits.toMissReqWoStoreData()
1082
1083      e.io.mem_grant.valid := false.B
1084      e.io.mem_grant.bits := DontCare
1085      when (io.mem_grant.bits.source === i.U) {
1086        e.io.mem_grant <> io.mem_grant
1087      }
1088
1089      when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) {
1090        e.io.miss_req_pipe_reg := miss_req_pipe_reg
1091      }.otherwise {
1092        e.io.miss_req_pipe_reg       := DontCare
1093        e.io.miss_req_pipe_reg.merge := false.B
1094        e.io.miss_req_pipe_reg.alloc := false.B
1095      }
1096
1097      e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire
1098
1099      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
1100      e.io.main_pipe_replay := io.mainpipe_info.s2_valid && io.mainpipe_info.s2_replay_to_mq && io.mainpipe_info.s2_miss_id === i.U
1101      e.io.main_pipe_refill_resp := io.mainpipe_info.s3_valid && io.mainpipe_info.s3_refill_resp && io.mainpipe_info.s3_miss_id === i.U
1102
1103      e.io.memSetPattenDetected := memSetPattenDetected
1104      e.io.nMaxPrefetchEntry := nMaxPrefetchEntry
1105
1106      e.io.main_pipe_req.ready := io.main_pipe_req.ready
1107
1108      for (j <- 0 until reqNum) {
1109        e.io.queryME(j).req.valid := io.queryMQ(j).req.valid
1110        e.io.queryME(j).req.bits  := io.queryMQ(j).req.bits.toMissReqWoStoreData()
1111      }
1112
1113      when(io.l2_hint.bits.sourceId === i.U) {
1114        e.io.l2_hint <> io.l2_hint
1115      } .otherwise {
1116        e.io.l2_hint.valid := false.B
1117        e.io.l2_hint.bits := DontCare
1118      }
1119  }
1120
1121  cmo_unit.io.req <> io.cmo_req
1122  io.cmo_resp <> cmo_unit.io.resp_to_lsq
1123  when (io.mem_grant.valid && io.mem_grant.bits.opcode === TLMessages.CBOAck) {
1124    cmo_unit.io.resp_chanD <> io.mem_grant
1125  } .otherwise {
1126    cmo_unit.io.resp_chanD.valid := false.B
1127    cmo_unit.io.resp_chanD.bits := DontCare
1128  }
1129
1130  io.req.ready := accept
1131  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
1132  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
1133
1134  io.refill_info.valid := VecInit(entries.zipWithIndex.map{ case(e,i) => e.io.refill_info.valid && io.mainpipe_info.s2_valid && io.mainpipe_info.s2_miss_id === i.U}).asUInt.orR
1135  io.refill_info.bits := Mux1H(entries.zipWithIndex.map{ case(e,i) => (io.mainpipe_info.s2_miss_id === i.U) -> e.io.refill_info.bits })
1136
1137  acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits)
1138  acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only)
1139
1140  XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire)
1141  XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid())
1142
1143  val acquire_sources = Seq(cmo_unit.io.req_chanA, acquire_from_pipereg) ++ entries.map(_.io.mem_acquire)
1144  TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*)
1145  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
1146
1147  // amo's main pipe req out
1148  fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
1149
1150  io.probe_block := Cat(probe_block_vec).orR
1151
1152  io.replace_block := io.replace_addr.valid && Cat(entries.map(e => e.io.req_addr.valid && e.io.req_addr.bits === io.replace_addr.bits) ++ Seq(miss_req_pipe_reg.block_match(io.replace_addr.bits))).orR
1153
1154  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
1155
1156  // prefetch related
1157  io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR)
1158
1159  io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR)
1160  io.prefetch_info.fdp.prefetch_monitor_cnt := io.main_pipe_req.fire
1161  io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source)
1162
1163  // L1MissTrace Chisel DB
1164  val debug_miss_trace = Wire(new L1MissTrace)
1165  debug_miss_trace.vaddr := io.req.bits.vaddr
1166  debug_miss_trace.paddr := io.req.bits.addr
1167  debug_miss_trace.source := io.req.bits.source
1168  debug_miss_trace.pc := io.req.bits.pc
1169
1170  val isWriteL1MissQMissTable = Constantin.createRecord(s"isWriteL1MissQMissTable${p(XSCoreParamsKey).HartId}")
1171  val table = ChiselDB.createTable(s"L1MissQMissTrace_hart${p(XSCoreParamsKey).HartId}", new L1MissTrace)
1172  table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset)
1173
1174  // Difftest
1175  if (env.EnableDifftest) {
1176    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
1177    difftest.coreid := io.hartId
1178    difftest.index := 1.U
1179    difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
1180    difftest.addr := io.refill_to_ldq.bits.addr
1181    difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data)
1182    difftest.idtfr := DontCare
1183  }
1184
1185  // Perf count
1186  XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel)
1187  XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc)
1188  XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad)
1189  XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore)
1190  XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO)
1191  XSPerfAccumulate("miss_req_prefetch_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1192  XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad)
1193  XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad)
1194  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
1195  XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1196  XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch)
1197  XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected)
1198  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
1199  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
1200  when (num_valids > max_inflight) {
1201    max_inflight := num_valids
1202  }
1203  // max inflight (average) = max_inflight_total / cycle cnt
1204  XSPerfAccumulate("max_inflight", max_inflight)
1205  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
1206  io.full := num_valids === cfg.nMissEntries.U
1207  io.l1Miss := RegNext(Cat(entries.map(_.io.l1Miss)).orR)
1208  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
1209
1210  XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1211  XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1212  XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1)
1213
1214  XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling)))
1215  XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling)))
1216  XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling)))
1217  XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling)))
1218
1219  val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR
1220
1221  entries.foreach {
1222    case e => {
1223      e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid
1224      e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits
1225    }
1226  }
1227
1228  io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache
1229
1230  val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready))))
1231  val perfEvents = Seq(
1232    ("dcache_missq_req      ", io.req.fire),
1233    ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))),
1234    ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))),
1235    ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))),
1236    ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))),
1237  )
1238  generatePerfEvent()
1239}
1240