xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision 1eb8dd224d63ba7d4afa63695f72d8230e150d37)
1/***************************************************************************************
2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences
4* Copyright (c) 2020-2021 Peng Cheng Laboratory
5*
6* XiangShan is licensed under Mulan PSL v2.
7* You can use this software according to the terms and conditions of the Mulan PSL v2.
8* You may obtain a copy of Mulan PSL v2 at:
9*          http://license.coscl.org.cn/MulanPSL2
10*
11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14*
15* See the Mulan PSL v2 for more details.
16***************************************************************************************/
17
18package xiangshan.mem
19
20import org.chipsalliance.cde.config.Parameters
21import chisel3._
22import chisel3.util._
23import freechips.rocketchip.util._
24import utils._
25import utility._
26import xiangshan._
27import xiangshan.backend.Bundles.DynInst
28import xiangshan.backend.fu.FuType._
29import xiangshan.mem.Bundles._
30import xiangshan.cache._
31import difftest._
32
33class SbufferFlushBundle extends Bundle {
34  val valid = Output(Bool())
35  val empty = Input(Bool())
36}
37
38trait HasSbufferConst extends HasXSParameter {
39  val EvictCycles = 1 << 20
40  val SbufferReplayDelayCycles = 16
41  require(isPow2(EvictCycles))
42  val EvictCountBits = log2Up(EvictCycles+1)
43  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
44
45  // dcache write hit resp has 2 sources
46  // refill pipe resp and main pipe resp (fixed:only main pipe resp)
47  // val NumDcacheWriteResp = 2 // hardcoded
48  val NumDcacheWriteResp = 1 // hardcoded
49
50  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
51  // paddr = ptag + offset
52  val CacheLineBytes: Int = CacheLineSize / 8
53  val CacheLineWords: Int = CacheLineBytes / DataBytes
54  val OffsetWidth: Int = log2Up(CacheLineBytes)
55  val WordsWidth: Int = log2Up(CacheLineWords)
56  val PTagWidth: Int = PAddrBits - OffsetWidth
57  val VTagWidth: Int = VAddrBits - OffsetWidth
58  val WordOffsetWidth: Int = PAddrBits - WordsWidth
59
60  val CacheLineVWords: Int = CacheLineBytes / VDataBytes
61  val VWordsWidth: Int = log2Up(CacheLineVWords)
62  val VWordWidth: Int = log2Up(VDataBytes)
63  val VWordOffsetWidth: Int = PAddrBits - VWordWidth
64}
65
66class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
67  val state_valid    = Bool() // this entry is active
68  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
69  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
70  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
71
72  def isInvalid(): Bool = !state_valid
73  def isValid(): Bool = state_valid
74  def isActive(): Bool = state_valid && !state_inflight
75  def isInflight(): Bool = state_inflight
76  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
77}
78
79class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
80
81class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
82  // univerisal writemask
83  val wvec = UInt(StoreBufferSize.W)
84  // 2 cycle update
85  val mask = UInt((VLEN/8).W)
86  val data = UInt(VLEN.W)
87  val vwordOffset = UInt(VWordOffsetWidth.W)
88  val wline = Bool() // write full cacheline
89}
90
91class MaskFlushReq(implicit p: Parameters) extends SbufferBundle {
92  // univerisal writemask
93  val wvec = UInt(StoreBufferSize.W)
94}
95
96class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
97  val io = IO(new Bundle(){
98    // update data and mask when alloc or merge
99    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
100    // clean mask when deq
101    val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq)))
102    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
103    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
104  })
105
106  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
107  // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
108  val mask = RegInit(
109    VecInit(Seq.fill(StoreBufferSize)(
110      VecInit(Seq.fill(CacheLineVWords)(
111        VecInit(Seq.fill(VDataBytes)(false.B))
112      ))
113    ))
114  )
115
116  // 2 cycle line mask clean
117  for(line <- 0 until StoreBufferSize){
118    val line_mask_clean_flag = GatedValidRegNext(
119      io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _)
120    )
121    line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line)
122    when(line_mask_clean_flag){
123      for(word <- 0 until CacheLineVWords){
124        for(byte <- 0 until VDataBytes){
125          mask(line)(word)(byte) := false.B
126        }
127      }
128    }
129  }
130
131  // 2 cycle data / mask update
132  for(i <- 0 until EnsbufferWidth) {
133    val req = io.writeReq(i)
134    for(line <- 0 until StoreBufferSize){
135      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
136      val sbuffer_in_s2_line_wen = GatedValidRegNext(sbuffer_in_s1_line_wen)
137      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
138      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
139      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
140      val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen)
141      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
142      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
143      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
144      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
145      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
146      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
147      for(word <- 0 until CacheLineVWords){
148        for(byte <- 0 until VDataBytes){
149          val write_byte = sbuffer_in_s2_line_wen && (
150            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
151            line_write_buffer_wline
152          )
153          when(write_byte){
154            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
155            mask(line)(word)(byte) := true.B
156          }
157        }
158      }
159    }
160  }
161
162  // 1 cycle line mask clean
163  // for(i <- 0 until EnsbufferWidth) {
164  //   val req = io.writeReq(i)
165  //   when(req.valid){
166  //     for(line <- 0 until StoreBufferSize){
167  //       when(
168  //         req.bits.wvec(line) &&
169  //         req.bits.cleanMask
170  //       ){
171  //         for(word <- 0 until CacheLineWords){
172  //           for(byte <- 0 until DataBytes){
173  //             mask(line)(word)(byte) := false.B
174  //             val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
175  //               req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
176  //               req.bits.wline
177  //             ))
178  //             assert(!debug_last_cycle_write_byte)
179  //           }
180  //         }
181  //       }
182  //     }
183  //   }
184  // }
185
186  io.dataOut := data
187  io.maskOut := mask
188}
189
190class Sbuffer(implicit p: Parameters)
191  extends DCacheModule
192    with HasSbufferConst
193    with HasPerfEvents {
194  val io = IO(new Bundle() {
195    val hartId = Input(UInt(hartIdLen.W))
196    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddrAndPfFlag)))  //Todo: store logic only support Width == 2 now
197    val vecDifftestInfo = Vec(EnsbufferWidth, Flipped(Decoupled(new DynInst)))
198    val dcache = Flipped(new DCacheToSbufferIO)
199    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
200    val sqempty = Input(Bool())
201    val sbempty = Output(Bool())
202    val flush = Flipped(new SbufferFlushBundle)
203    val csrCtrl = Flipped(new CustomCSRCtrlIO)
204    val store_prefetch = Vec(StorePipelineWidth, DecoupledIO(new StorePrefetchReq)) // to dcache
205    val memSetPattenDetected = Input(Bool())
206    val force_write = Input(Bool())
207  })
208
209  val dataModule = Module(new SbufferData)
210  dataModule.io.writeReq <> DontCare
211  val prefetcher = Module(new StorePfWrapper())
212  val writeReq = dataModule.io.writeReq
213
214  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
215  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
216  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
217  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
218  val data = dataModule.io.dataOut
219  val mask = dataModule.io.maskOut
220  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
221  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
222  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
223
224  val sbuffer_out_s0_fire = Wire(Bool())
225
226  /*
227       idle --[flush]   --> drain   --[buf empty]--> idle
228            --[buf full]--> replace --[dcache resp]--> idle
229  */
230  // x_drain_all: drain store queue and sbuffer
231  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
232  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
233  def needDrain(state: UInt): Bool =
234    state(1)
235  val sbuffer_state = RegInit(x_idle)
236
237  // ---------------------- Store Enq Sbuffer ---------------------
238
239  def getPTag(pa: UInt): UInt =
240    pa(PAddrBits - 1, PAddrBits - PTagWidth)
241
242  def getVTag(va: UInt): UInt =
243    va(VAddrBits - 1, VAddrBits - VTagWidth)
244
245  def getWord(pa: UInt): UInt =
246    pa(PAddrBits-1, 3)
247
248  def getVWord(pa: UInt): UInt =
249    pa(PAddrBits-1, 4)
250
251  def getWordOffset(pa: UInt): UInt =
252    pa(OffsetWidth-1, 3)
253
254  def getVWordOffset(pa: UInt): UInt =
255    pa(OffsetWidth-1, 4)
256
257  def getAddr(ptag: UInt): UInt =
258    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
259
260  def getByteOffset(offect: UInt): UInt =
261    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
262
263  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
264    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR
265
266  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
267
268  // sbuffer entry count
269
270  val plru = new ValidPseudoLRU(StoreBufferSize)
271  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
272
273  val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate()))
274
275  val replaceAlgoIdx = plru.way(candidateVec.reverse)._2
276  val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate()
277
278  assert(!(candidateVec.asUInt.orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way")
279
280  val replaceIdx = replaceAlgoIdx
281  plru.access(accessIdx)
282
283  //-------------------------cohCount-----------------------------
284  // insert and merge: cohCount=0
285  // every cycle cohCount+=1
286  // if cohCount(EvictCountBits-1)==1, evict
287  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
288  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
289  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
290  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
291  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
292  val missqReplayHasTimeOut = GatedValidRegNext(missqReplayHasTimeOutGen) && !GatedValidRegNext(sbuffer_out_s0_fire)
293  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
294
295  //-------------------------sbuffer enqueue-----------------------------
296
297  // Now sbuffer enq logic is divided into 3 stages:
298
299  // sbuffer_in_s0:
300  // * read data and meta from store queue
301  // * store them in 2 entry fifo queue
302
303  // sbuffer_in_s1:
304  // * read data and meta from fifo queue
305  // * update sbuffer meta (vtag, ptag, flag)
306  // * prevent that line from being sent to dcache (add a block condition)
307  // * prepare cacheline level write enable signal, RegNext() data and mask
308
309  // sbuffer_in_s2:
310  // * use cacheline level buffer to update sbuffer data and mask
311  // * remove dcache write block (if there is)
312
313  val activeMask = VecInit(stateVec.map(s => s.isActive()))
314  val validMask  = VecInit(stateVec.map(s => s.isValid()))
315  val drainIdx = PriorityEncoder(activeMask)
316
317  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
318
319  val inptags = io.in.map(in => getPTag(in.bits.addr))
320  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
321  val sameTag = inptags(0) === inptags(1) && io.in(0).valid && io.in(1).valid && io.in(0).bits.vecValid && io.in(1).bits.vecValid
322  val firstWord = getVWord(io.in(0).bits.addr)
323  val secondWord = getVWord(io.in(1).bits.addr)
324  // merge condition
325  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
326  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
327  val canMerge = mergeMask.map(ParallelOR(_))
328  val mergeVec = mergeMask.map(_.asUInt)
329
330  for(i <- 0 until EnsbufferWidth){
331    mergeMask(i) := widthMap(j =>
332      inptags(i) === ptag(j) && activeMask(j)
333    )
334    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire && io.in(i).bits.vecValid))
335  }
336
337  // insert condition
338  // firstInsert: the first invalid entry
339  // if first entry canMerge or second entry has the same ptag with the first entry,
340  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
341  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
342  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
343  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
344
345  def getFirstOneOH(input: UInt): UInt = {
346    assert(input.getWidth > 1)
347    val output = WireInit(VecInit(input.asBools))
348    (1 until input.getWidth).map(i => {
349      output(i) := !input(i - 1, 0).orR && input(i)
350    })
351    output.asUInt
352  }
353
354  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
355  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
356  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
357  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
358  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
359  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
360  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
361  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
362
363  val enbufferSelReg = RegInit(false.B)
364  when(io.in(0).valid) {
365    enbufferSelReg := ~enbufferSelReg
366  }
367
368  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
369  val secondInsertIdx = Mux(sameTag,
370    firstInsertIdx,
371    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
372  ) // slow to generate, for debug only
373  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
374  val secondInsertVec = Mux(sameTag,
375    firstInsertVec,
376    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
377  ) // slow to generate, for debug only
378  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
379  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
380    firstCanInsert,
381    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
382  ) && (EnsbufferWidth >= 1).B
383  val forward_need_uarch_drain = WireInit(false.B)
384  val merge_need_uarch_drain = WireInit(false.B)
385  val do_uarch_drain = GatedValidRegNext(forward_need_uarch_drain) || GatedValidRegNext(GatedValidRegNext(merge_need_uarch_drain))
386  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
387
388  io.in(0).ready := firstCanInsert
389  io.in(1).ready := secondCanInsert && io.in(0).ready
390
391  for (i <- 0 until EnsbufferWidth) {
392    // train
393    if (EnableStorePrefetchSPB) {
394      prefetcher.io.sbuffer_enq(i).valid := io.in(i).fire && io.in(i).bits.vecValid
395      prefetcher.io.sbuffer_enq(i).bits := DontCare
396      prefetcher.io.sbuffer_enq(i).bits.vaddr := io.in(i).bits.vaddr
397    } else {
398      prefetcher.io.sbuffer_enq(i).valid := false.B
399      prefetcher.io.sbuffer_enq(i).bits := DontCare
400    }
401
402    // prefetch req
403    if (EnableStorePrefetchAtCommit) {
404      if (EnableAtCommitMissTrigger) {
405        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid && io.in(i).bits.prefetch)
406      } else {
407        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid)
408      }
409      io.store_prefetch(i).bits.paddr := DontCare
410      io.store_prefetch(i).bits.vaddr := Mux(prefetcher.io.prefetch_req(i).valid, prefetcher.io.prefetch_req(i).bits.vaddr, io.in(i).bits.vaddr)
411      prefetcher.io.prefetch_req(i).ready := io.store_prefetch(i).ready
412    } else {
413      io.store_prefetch(i) <> prefetcher.io.prefetch_req(i)
414    }
415    io.store_prefetch zip prefetcher.io.prefetch_req drop 2 foreach (x => x._1 <> x._2)
416  }
417  prefetcher.io.memSetPattenDetected := io.memSetPattenDetected
418
419  def wordReqToBufLine( // allocate a new line in sbuffer
420    req: DCacheWordReq,
421    reqptag: UInt,
422    reqvtag: UInt,
423    insertIdx: UInt,
424    insertVec: UInt,
425    wordOffset: UInt
426  ): Unit = {
427    assert(UIntToOH(insertIdx) === insertVec)
428    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
429    (0 until StoreBufferSize).map(entryIdx => {
430      when(insertVec(entryIdx)){
431        stateVec(entryIdx).state_valid := true.B
432        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
433        when(sameBlockInflightMask.orR){
434          waitInflightMask(entryIdx) := sameBlockInflightMask
435        }
436        cohCount(entryIdx) := 0.U
437        // missqReplayCount(insertIdx) := 0.U
438        ptag(entryIdx) := reqptag
439        vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated
440      }
441    })
442  }
443
444  def mergeWordReq( // merge write req into an existing line
445    req: DCacheWordReq,
446    reqptag: UInt,
447    reqvtag: UInt,
448    mergeIdx: UInt,
449    mergeVec: UInt,
450    wordOffset: UInt
451  ): Unit = {
452    assert(UIntToOH(mergeIdx) === mergeVec)
453    (0 until StoreBufferSize).map(entryIdx => {
454      when(mergeVec(entryIdx)) {
455        cohCount(entryIdx) := 0.U
456        // missqReplayCount(entryIdx) := 0.U
457        // check if vtag is the same, if not, trigger sbuffer flush
458        when(reqvtag =/= vtag(entryIdx)) {
459          merge_need_uarch_drain := true.B
460        }
461      }
462    })
463  }
464
465  for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
466    writeReq(i).valid := in.fire && in.bits.vecValid
467    writeReq(i).bits.vwordOffset := vwordOffset
468    writeReq(i).bits.mask := in.bits.mask
469    writeReq(i).bits.data := in.bits.data
470    writeReq(i).bits.wline := in.bits.wline
471    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
472    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
473    assert(!((PopCount(insertVec) > 1.U) && in.fire && in.bits.vecValid))
474    val insertIdx = OHToUInt(insertVec)
475    val accessValid = in.fire && in.bits.vecValid
476    accessIdx(i).valid := RegNext(accessValid)
477    accessIdx(i).bits := RegEnable(Mux(canMerge(i), mergeIdx(i), insertIdx), accessValid)
478
479    XSDebug(accessValid && canMerge(i), p"merge req $i to line [${mergeIdx(i)}]\n")
480    XSDebug(accessValid && !canMerge(i), p"insert req $i to line[$insertIdx]\n")
481    when(accessValid){
482      when(canMerge(i)){
483        writeReq(i).bits.wvec := mergeVec(i)
484        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset)
485      }.otherwise({
486        writeReq(i).bits.wvec := insertVec
487        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset)
488        assert(debug_insertIdx === insertIdx)
489      })
490    }
491    // XSDebug of mergeWordReq
492    (0 until StoreBufferSize).map(entryIdx => {
493      XSDebug(
494        accessValid && canMerge(i) &&
495          mergeVec(i)(entryIdx) && invtags(i) =/= vtag(entryIdx),
496        "reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
497        invtags(i) << OffsetWidth,
498        inptags(i) << OffsetWidth,
499        vtag(entryIdx) << OffsetWidth,
500        ptag(entryIdx) << OffsetWidth
501      )
502    })
503  }
504
505
506  for(i <- 0 until StoreBufferSize){
507    XSDebug(stateVec(i).isValid(),
508      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
509    )
510  }
511
512  for((req, i) <- io.in.zipWithIndex){
513    XSDebug(req.fire && req.bits.vecValid,
514      p"accept req [$i]: " +
515        p"addr:${Hexadecimal(req.bits.addr)} " +
516        p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " +
517        p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n"
518    )
519    XSDebug(req.valid && !req.ready,
520      p"req [$i] blocked by sbuffer\n"
521    )
522  }
523
524  // for now, when enq, trigger a prefetch (if EnableAtCommitMissTrigger)
525  require(EnsbufferWidth <= StorePipelineWidth)
526
527  // ---------------------- Send Dcache Req ---------------------
528
529  val sbuffer_empty = Cat(invalidMask).andR
530  val sq_empty = !Cat(io.in.map(_.valid)).orR
531  val empty = sbuffer_empty && sq_empty
532  val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
533  threshold := Constantin.createRecord(s"StoreBufferThreshold_${p(XSCoreParamsKey).HartId}", initValue = 7)
534  val base = Wire(UInt(5.W))
535  base := Constantin.createRecord(s"StoreBufferBase_${p(XSCoreParamsKey).HartId}", initValue = 4)
536  val ActiveCount = PopCount(activeMask)
537  val ValidCount = PopCount(validMask)
538  val forceThreshold = Mux(io.force_write, threshold - base, threshold)
539  val do_eviction = GatedValidRegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B)
540  require((StoreBufferThreshold + 1) <= StoreBufferSize)
541
542  XSDebug(p"ActiveCount[$ActiveCount]\n")
543
544  io.sbempty := GatedValidRegNext(empty)
545  io.flush.empty := GatedValidRegNext(empty && io.sqempty)
546  // lru.io.flush := sbuffer_state === x_drain_all && empty
547  switch(sbuffer_state){
548    is(x_idle){
549      when(io.flush.valid){
550        sbuffer_state := x_drain_all
551      }.elsewhen(do_uarch_drain){
552        sbuffer_state := x_drain_sbuffer
553      }.elsewhen(do_eviction){
554        sbuffer_state := x_replace
555      }
556    }
557    is(x_drain_all){
558      when(empty){
559        sbuffer_state := x_idle
560      }
561    }
562    is(x_drain_sbuffer){
563      when(io.flush.valid){
564        sbuffer_state := x_drain_all
565      }.elsewhen(sbuffer_empty){
566        sbuffer_state := x_idle
567      }
568    }
569    is(x_replace){
570      when(io.flush.valid){
571        sbuffer_state := x_drain_all
572      }.elsewhen(do_uarch_drain){
573        sbuffer_state := x_drain_sbuffer
574      }.elsewhen(!do_eviction){
575        sbuffer_state := x_idle
576      }
577    }
578  }
579  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
580
581  def noSameBlockInflight(idx: UInt): Bool = {
582    // stateVec(idx) itself must not be s_inflight
583    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR
584  }
585
586  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
587    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
588    assert(!(PopCount(mask) > 1.U))
589    mask
590  }
591
592  def haveSameBlockInflight(ptag_in: UInt): Bool = {
593    genSameBlockInflightMask(ptag_in).orR
594  }
595
596  // ---------------------------------------------------------------------------
597  // sbuffer to dcache pipeline
598  // ---------------------------------------------------------------------------
599
600  // Now sbuffer deq logic is divided into 2 stages:
601
602  // sbuffer_out_s0:
603  // * read data and meta from sbuffer
604  // * RegNext() them
605  // * set line state to inflight
606
607  // sbuffer_out_s1:
608  // * send write req to dcache
609
610  // sbuffer_out_extra:
611  // * receive write result from dcache
612  // * update line state
613
614  val sbuffer_out_s1_ready = Wire(Bool())
615
616  // ---------------------------------------------------------------------------
617  // sbuffer_out_s0
618  // ---------------------------------------------------------------------------
619
620  val need_drain = needDrain(sbuffer_state)
621  val need_replace = do_eviction || (sbuffer_state === x_replace)
622  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
623    missqReplayTimeOutIdx,
624    Mux(need_drain,
625      drainIdx,
626      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
627    )
628  )
629
630  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
631  // current eviction should be blocked.
632  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
633    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
634    (need_drain || cohHasTimeOut || need_replace)
635  assert(!(
636    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
637    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
638  ))
639  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
640  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
641
642  // ---------------------------------------------------------------------------
643  // sbuffer_out_s1
644  // ---------------------------------------------------------------------------
645
646  // TODO: use EnsbufferWidth
647  val shouldWaitWriteFinish = GatedValidRegNext(VecInit((0 until EnsbufferWidth).map{i =>
648    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
649    writeReq(i).valid
650  }).asUInt.orR)
651  // block dcache write if read / write hazard
652  val blockDcacheWrite = shouldWaitWriteFinish
653
654  val sbuffer_out_s1_valid = RegInit(false.B)
655  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
656  val sbuffer_out_s1_fire = io.dcache.req.fire
657
658  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
659  when(sbuffer_out_s1_fire){
660    sbuffer_out_s1_valid := false.B
661  }
662  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
663  when(sbuffer_out_s0_cango){
664    sbuffer_out_s1_valid := sbuffer_out_s0_valid
665  }
666  when(sbuffer_out_s0_fire){
667    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
668    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
669    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
670  }
671  XSDebug(sbuffer_out_s0_fire, p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
672
673  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
674  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
675    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
676  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
677  // Note: if other dcache req in the same block are inflight,
678  // the lru update may not accurate
679  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
680    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
681  accessIdx(EnsbufferWidth).bits := replaceIdx
682  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, sbuffer_out_s0_fire)
683  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
684  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
685
686  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
687  io.dcache.req.bits := DontCare
688  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
689  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
690  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
691  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
692  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
693  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
694
695  XSDebug(sbuffer_out_s1_fire,
696    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
697  )
698
699  // update sbuffer status according to dcache resp source
700
701  def id_to_sbuffer_id(id: UInt): UInt = {
702    require(id.getWidth >= log2Up(StoreBufferSize))
703    id(log2Up(StoreBufferSize)-1, 0)
704  }
705
706  // hit resp
707  io.dcache.hit_resps.map(resp => {
708    val dcache_resp_id = resp.bits.id
709    when (resp.fire) {
710      stateVec(dcache_resp_id).state_inflight := false.B
711      stateVec(dcache_resp_id).state_valid := false.B
712      assert(!resp.bits.replay)
713      assert(!resp.bits.miss) // not need to resp if miss, to be opted
714      assert(stateVec(dcache_resp_id).state_inflight === true.B)
715    }
716
717    // Update w_sameblock_inflight flag is delayed for 1 cycle
718    //
719    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
720    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
721    // if the same block is still inflight
722    (0 until StoreBufferSize).map(i => {
723      when(
724        stateVec(i).w_sameblock_inflight &&
725        stateVec(i).state_valid &&
726        GatedValidRegNext(resp.fire) &&
727        waitInflightMask(i) === UIntToOH(RegEnable(id_to_sbuffer_id(dcache_resp_id), resp.fire))
728      ){
729        stateVec(i).w_sameblock_inflight := false.B
730      }
731    })
732  })
733
734  io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => {
735    maskFlush.valid := resp.fire
736    maskFlush.bits.wvec := UIntToOH(resp.bits.id)
737  }}
738
739  // replay resp
740  val replay_resp_id = io.dcache.replay_resp.bits.id
741  when (io.dcache.replay_resp.fire) {
742    missqReplayCount(replay_resp_id) := 0.U
743    stateVec(replay_resp_id).w_timeout := true.B
744    // waiting for timeout
745    assert(io.dcache.replay_resp.bits.replay)
746    assert(stateVec(replay_resp_id).state_inflight === true.B)
747  }
748
749  // TODO: reuse cohCount
750  (0 until StoreBufferSize).map(i => {
751    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
752      missqReplayCount(i) := missqReplayCount(i) + 1.U
753    }
754    when(activeMask(i) && !cohTimeOutMask(i)){
755      cohCount(i) := cohCount(i)+1.U
756    }
757  })
758
759  if (env.EnableDifftest) {
760    // hit resp
761    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
762      val difftest = DifftestModule(new DiffSbufferEvent, delay = 1)
763      val dcache_resp_id = resp.bits.id
764      difftest.coreid := io.hartId
765      difftest.index  := index.U
766      difftest.valid  := resp.fire
767      difftest.addr   := getAddr(ptag(dcache_resp_id))
768      difftest.data   := data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W)))
769      difftest.mask   := mask(dcache_resp_id).asUInt
770    }}
771  }
772
773  // ---------------------- Load Data Forward ---------------------
774  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
775  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
776  for ((forward, i) <- io.forward.zipWithIndex) {
777    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
778    // ptag_matches uses paddr from dtlb, which is far from sbuffer
779    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
780    val tag_matches = vtag_matches
781    val tag_mismatch = GatedValidRegNext(forward.valid) && VecInit(widthMap(w =>
782      GatedValidRegNext(vtag_matches(w)) =/= ptag_matches(w) && GatedValidRegNext((activeMask(w) || inflightMask(w)))
783    )).asUInt.orR
784    mismatch(i) := tag_mismatch
785    when (tag_mismatch) {
786      forward_need_uarch_drain := true.B
787    }
788    XSDebug(
789      tag_mismatch,
790      "forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
791      RegNext(ptag_matches.asUInt),
792      RegNext(vtag_matches.asUInt),
793      RegNext(forward.vaddr),
794      RegNext(forward.paddr)
795    )
796    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
797    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
798    val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr))
799
800    val valid_tag_match_reg = valid_tag_matches.map(RegEnable(_, forward.valid))
801    val inflight_tag_match_reg = inflight_tag_matches.map(RegEnable(_, forward.valid))
802    val forward_mask_candidate_reg = RegEnable(
803      VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))),
804      forward.valid
805    )
806    val forward_data_candidate_reg = RegEnable(
807      VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))),
808      forward.valid
809    )
810
811    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
812    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
813    selectedValidMask.suggestName("selectedValidMask_"+i)
814    selectedValidData.suggestName("selectedValidData_"+i)
815
816    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
817    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
818    selectedInflightMask.suggestName("selectedInflightMask_"+i)
819    selectedInflightData.suggestName("selectedInflightData_"+i)
820
821    // currently not being used
822    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
823    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
824
825    forward.dataInvalid := false.B // data in store line merge buffer is always ready
826    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
827    for (j <- 0 until VDataBytes) {
828      forward.forwardMask(j) := false.B
829      forward.forwardData(j) := DontCare
830
831      // valid entries have higher priority than inflight entries
832      when(selectedInflightMask(j)) {
833        forward.forwardMask(j) := true.B
834        forward.forwardData(j) := selectedInflightData(j)
835      }
836      when(selectedValidMask(j)) {
837        forward.forwardMask(j) := true.B
838        forward.forwardData(j) := selectedValidData(j)
839      }
840
841      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
842    }
843    forward.addrInvalid := DontCare
844  }
845
846  for (i <- 0 until StoreBufferSize) {
847    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
848      ptag(i) << OffsetWidth,
849      vtag(i) << OffsetWidth,
850      stateVec(i).isValid(),
851      activeMask(i),
852      inflightMask(i),
853      stateVec(i).w_timeout
854    )
855  }
856
857  /*
858  *
859  **********************************************************
860  *      -------------                   -------------     *
861  *      | XiangShan |                   |    NEMU   |     *
862  *      -------------                   -------------     *
863  *            |                               |           *
864  *            V                               V           *
865  *          -----                           -----         *
866  *          | Q |                           | Q |         *
867  *          | U |                           | U |         *
868  *          | E |                           | E |         *
869  *          | U |                           | U |         *
870  *          | E |                           | E |         *
871  *          |   |                           |   |         *
872  *          -----                           -----         *
873  *            |                               |           *
874  *            |        --------------         |           *
875  *            |>>>>>>>>|  DIFFTEST  |<<<<<<<<<|           *
876  *                     --------------                     *
877  **********************************************************
878  */
879  // Initialize when unenabled difftest.
880  for (i <- 0 until EnsbufferWidth) {
881    io.vecDifftestInfo(i) := DontCare
882  }
883  if (env.EnableDifftest) {
884    val VecMemFLOWMaxNumber = 16
885    val WlineMaxNumber = blockWords
886
887    def UIntSlice(in: UInt, High: UInt, Low: UInt): UInt = {
888      val maxNum = in.getWidth
889      val result = Wire(Vec(maxNum, Bool()))
890
891      for (i <- 0 until maxNum) {
892        when (Low + i.U <= High) {
893          result(i) := in(Low + i.U)
894        }.otherwise{
895          result(i) := 0.U
896        }
897      }
898
899      result.asUInt
900    }
901
902    // To align with 'nemu', we need:
903    //  For 'unit-store' and 'whole' vector store instr, we re-split here,
904    //  and for the res, we do nothing.
905    for (i <- 0 until EnsbufferWidth) {
906      io.vecDifftestInfo(i).ready := io.in(i).ready
907
908      val uop             = io.vecDifftestInfo(i).bits
909
910      val isVse           = isVStore(uop.fuType) && LSUOpType.isUStride(uop.fuOpType)
911      val isVsm           = isVStore(uop.fuType) && VstuType.isMasked(uop.fuOpType)
912      val isVsr           = isVStore(uop.fuType) && VstuType.isWhole(uop.fuOpType)
913
914      val vpu             = uop.vpu
915      val veew            = uop.vpu.veew
916      val eew             = EewLog2(veew)
917      val EEB             = (1.U << eew).asUInt //Only when VLEN=128 effective element byte
918      val EEWBits         = (EEB << 3.U).asUInt
919      val nf              = Mux(isVsr, 0.U, vpu.nf)
920
921      val isSegment       = nf =/= 0.U && !isVsm
922      val isVSLine        = (isVse || isVsm || isVsr) && !isSegment
923      val isWline         = io.in(i).bits.wline
924
925      // The number of stores generated by a uop theroy.
926      // No other vector instructions need to be considered.
927      val flow            = Mux(
928                              isVSLine,
929                              (16.U >> eew).asUInt,
930                              0.U
931                            )
932
933      val rawData         = io.in(i).bits.data
934      val rawMask         = io.in(i).bits.mask
935      val rawAddr         = io.in(i).bits.addr
936
937      // A common difftest interface for scalar and vector instr
938      val difftestCommon = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true)
939      when (isVSLine) {
940        val splitMask         = UIntSlice(rawMask, EEB - 1.U, 0.U)(7,0)  // Byte
941        val splitData         = UIntSlice(rawData, EEWBits - 1.U, 0.U)(63,0) // Double word
942        val storeCommit       = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid
943        val waddr             = rawAddr
944        val wmask             = splitMask
945        val wdata             = splitData & MaskExpand(splitMask)
946
947        difftestCommon.coreid := io.hartId
948        difftestCommon.index  := (i*VecMemFLOWMaxNumber).U
949        difftestCommon.valid  := storeCommit
950        difftestCommon.addr   := waddr
951        difftestCommon.data   := wdata
952        difftestCommon.mask   := wmask
953        difftestCommon.robidx := io.vecDifftestInfo(i).bits.robIdx.value
954        difftestCommon.pc     := io.vecDifftestInfo(i).bits.pc
955
956      } .elsewhen (!isWline) {
957        val storeCommit       = io.in(i).fire
958        val waddr             = ZeroExt(Cat(io.in(i).bits.addr(PAddrBits - 1, 3), 0.U(3.W)), 64)
959        val sbufferMask       = shiftMaskToLow(io.in(i).bits.addr, io.in(i).bits.mask)
960        val sbufferData       = shiftDataToLow(io.in(i).bits.addr, io.in(i).bits.data)
961        val wmask             = sbufferMask
962        val wdata             = sbufferData & MaskExpand(sbufferMask)
963
964        difftestCommon.coreid := io.hartId
965        difftestCommon.index  := (i*VecMemFLOWMaxNumber).U
966        difftestCommon.valid  := storeCommit && io.in(i).bits.vecValid
967        difftestCommon.addr   := waddr
968        difftestCommon.data   := wdata
969        difftestCommon.mask   := wmask
970        difftestCommon.robidx := io.vecDifftestInfo(i).bits.robIdx.value
971        difftestCommon.pc     := io.vecDifftestInfo(i).bits.pc
972      }
973
974      for (index <- 0 until WlineMaxNumber) {
975        val difftest = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true)
976
977        val storeCommit = io.in(i).fire && io.in(i).bits.vecValid
978        val blockAddr = get_block_addr(io.in(i).bits.addr)
979
980        when (isWline) {
981          difftest.coreid := io.hartId
982          difftest.index  := (i*VecMemFLOWMaxNumber + index).U
983          difftest.valid  := storeCommit
984          difftest.addr   := blockAddr + (index.U << wordOffBits)
985          difftest.data   := io.in(i).bits.data
986          difftest.mask   := ((1 << wordBytes) - 1).U
987          difftest.robidx := io.vecDifftestInfo(i).bits.robIdx.value
988          difftest.pc     := io.vecDifftestInfo(i).bits.pc
989
990          assert(!storeCommit || (io.in(i).bits.data === 0.U), "wline only supports whole zero write now")
991        }
992      }
993
994      // Only the interface used by the 'unit-store' and 'whole' vector store instr
995      for (index <- 1 until VecMemFLOWMaxNumber) {
996        val difftest = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true)
997
998        // I've already done something process with 'mask' outside:
999        //  Different cases of 'vm' have been considered:
1000        //    Any valid store will definitely not have all 0 masks,
1001        //    and the extra part due to unaligned access must have a mask of 0
1002        when (index.U < flow && isVSLine) {
1003          // Make NEMU-difftest happy
1004          val shiftIndex  = EEB*index.U
1005          val shiftFlag   = shiftIndex(2,0).orR // Double word Flag
1006          val shiftBytes  = Mux(shiftFlag, shiftIndex(2,0), 0.U)
1007          val shiftBits   = shiftBytes << 3.U
1008          val splitMask   = UIntSlice(rawMask, (EEB*(index+1).U - 1.U), EEB*index.U)(7,0)  // Byte
1009          val splitData   = UIntSlice(rawData, (EEWBits*(index+1).U - 1.U), EEWBits*index.U)(63,0) // Double word
1010          val storeCommit = io.in(i).fire && splitMask.orR  && io.in(i).bits.vecValid
1011          val waddr       = Cat(rawAddr(PAddrBits - 1, 4), Cat(shiftIndex(3), 0.U(3.W)))
1012          val wmask       = splitMask << shiftBytes
1013          val wdata       = (splitData & MaskExpand(splitMask)) << shiftBits
1014
1015          difftest.coreid := io.hartId
1016          difftest.index  := (i*VecMemFLOWMaxNumber+index).U
1017          difftest.valid  := storeCommit
1018          difftest.addr   := waddr
1019          difftest.data   := wdata
1020          difftest.mask   := wmask
1021          difftest.robidx := io.vecDifftestInfo(i).bits.robIdx.value
1022          difftest.pc     := io.vecDifftestInfo(i).bits.pc
1023        }
1024      }
1025    }
1026  }
1027
1028  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
1029  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
1030  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
1031  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire)).asUInt))
1032  XSPerfAccumulate("sbuffer_req_fire_vecinvalid", PopCount(VecInit(io.in.map(data => data.fire && !data.bits.vecValid)).asUInt))
1033  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt))
1034  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt))
1035  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
1036  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire)
1037  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
1038  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
1039  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
1040  XSPerfAccumulate("evenCanInsert", evenCanInsert)
1041  XSPerfAccumulate("oddCanInsert", oddCanInsert)
1042  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire)
1043  //XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire)
1044  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire)
1045  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
1046
1047  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire, io.lsu.resp.fire)
1048  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
1049  // XSPerfAccumulate("store_req", io.lsu.req.fire)
1050
1051  val perfEvents = Seq(
1052    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
1053    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire)).asUInt)                                                               ),
1054    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt)                ),
1055    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt)               ),
1056    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
1057    ("dcache_req_fire   ", io.dcache.req.fire                                                                                        ),
1058    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
1059    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
1060    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
1061    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire                                                                         ),
1062    //("refill_resp_valid ", io.dcache.refill_hit_resp.fire                                                                            ),
1063    ("replay_resp_valid ", io.dcache.replay_resp.fire                                                                                ),
1064    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
1065    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
1066    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
1067    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
1068    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
1069  )
1070  generatePerfEvent()
1071
1072}
1073