xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala (revision d6477c69bc3348d63058f8f4cebbf80cad7ca1e0)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.IdRange
23import freechips.rocketchip.tilelink.ClientStates._
24import freechips.rocketchip.tilelink.TLPermissions._
25import freechips.rocketchip.tilelink._
26import xiangshan._
27import huancun.{AliasKey, DirtyKey}
28import xiangshan.cache._
29import utils._
30
31
32abstract class ICacheMissUnitModule(implicit p: Parameters) extends XSModule
33  with HasICacheParameters
34
35abstract class ICacheMissUnitBundle(implicit p: Parameters) extends XSBundle
36  with HasICacheParameters
37
38class ICacheMissReq(implicit p: Parameters) extends ICacheBundle
39{
40    val paddr      = UInt(PAddrBits.W)
41    val vaddr      = UInt(VAddrBits.W)
42    val waymask   = UInt(nWays.W)
43    val coh       = new ClientMetadata
44
45    def getVirSetIdx = get_idx(vaddr)
46    def getPhyTag    = get_phy_tag(paddr)
47}
48
49
50class ICacheMissResp(implicit p: Parameters) extends ICacheBundle
51{
52    val data     = UInt(blockBits.W)
53}
54
55class ICacheMissBundle(implicit p: Parameters) extends ICacheBundle{
56    val req       =   Vec(2, Flipped(DecoupledIO(new ICacheMissReq)))
57    val resp      =   Vec(2,ValidIO(new ICacheMissResp))
58    val flush     =   Input(Bool())
59}
60
61
62class ICacheMissEntry(edge: TLEdgeOut, id: Int)(implicit p: Parameters) extends ICacheMissUnitModule
63  with MemoryOpConstants
64{
65  val io = IO(new Bundle {
66    val id = Input(UInt(log2Ceil(nMissEntries).W))
67
68    val req = Flipped(DecoupledIO(new ICacheMissReq))
69    val resp = ValidIO(new ICacheMissResp)
70
71    //tilelink channel
72    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
73    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
74    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
75
76    val meta_write = DecoupledIO(new ICacheMetaWriteBundle)
77    val data_write = DecoupledIO(new ICacheDataWriteBundle)
78
79    val release_req    =  DecoupledIO(new ReplacePipeReq)
80    val release_resp   =  Flipped(ValidIO(UInt(ReplaceIdWid.W)))
81    val victimInfor        =  Output(new ICacheVictimInfor())
82  })
83
84  /** default value for control signals */
85  io.resp := DontCare
86  io.mem_acquire.bits := DontCare
87  io.mem_grant.ready := true.B
88  io.meta_write.bits := DontCare
89  io.data_write.bits := DontCare
90
91  val s_idle  :: s_send_mem_aquire :: s_wait_mem_grant :: s_write_back :: s_send_grant_ack :: s_send_replace :: s_wait_replace :: s_wait_resp :: Nil = Enum(8)
92  val state = RegInit(s_idle)
93  /** control logic transformation */
94  //request register
95  val req = Reg(new ICacheMissReq)
96  val req_idx = req.getVirSetIdx //virtual index
97  val req_tag = req.getPhyTag //physical tag
98  val req_waymask = req.waymask
99  val release_id  = Cat(MissQueueKey.U, id.U)
100  val victim_need_release = req.coh.isValid()
101
102  io.victimInfor.valid := state === s_send_replace || state === s_wait_replace || state === s_wait_resp
103  io.victimInfor.vidx  := req_idx
104
105  val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
106
107  //cacheline register
108  val readBeatCnt = Reg(UInt(log2Up(refillCycles).W))
109  val respDataReg = Reg(Vec(refillCycles, UInt(beatBits.W)))
110
111  //initial
112  io.resp.bits := DontCare
113  io.mem_acquire.bits := DontCare
114  io.mem_grant.ready := true.B
115  io.meta_write.bits := DontCare
116  io.data_write.bits := DontCare
117
118  io.release_req.bits.paddr := req.paddr
119  io.release_req.bits.vaddr := req.vaddr
120  io.release_req.bits.voluntary := true.B
121  io.release_req.bits.waymask   := req.waymask
122  io.release_req.bits.id   := release_id
123  io.release_req.bits.param := DontCare //release will not care tilelink param
124
125  io.req.ready := (state === s_idle)
126  io.mem_acquire.valid := (state === s_send_mem_aquire)
127  io.release_req.valid := (state === s_send_replace)
128
129  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
130  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
131  val is_dirty = RegInit(false.B)
132  val is_grant = RegEnable(edge.isRequest(io.mem_grant.bits), io.mem_grant.fire())
133
134  //state change
135  switch(state) {
136    is(s_idle) {
137      when(io.req.fire()) {
138        readBeatCnt := 0.U
139        state := s_send_mem_aquire
140        req := io.req.bits
141      }
142    }
143
144    // memory request
145    is(s_send_mem_aquire) {
146      when(io.mem_acquire.fire()) {
147        state := s_wait_mem_grant
148      }
149    }
150
151    is(s_wait_mem_grant) {
152      when(edge.hasData(io.mem_grant.bits)) {
153        when(io.mem_grant.fire()) {
154          readBeatCnt := readBeatCnt + 1.U
155          respDataReg(readBeatCnt) := io.mem_grant.bits.data
156          grant_param := io.mem_grant.bits.param
157          is_dirty    := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
158          when(readBeatCnt === (refillCycles - 1).U) {
159            assert(refill_done, "refill not done!")
160            state := s_send_grant_ack
161          }
162        }
163      }
164    }
165
166    is(s_send_grant_ack) {
167      when(io.mem_finish.fire()) {
168        state := Mux(victim_need_release,s_send_replace,s_write_back)
169      }
170    }
171
172    is(s_send_replace){
173      when(io.release_req.fire()){
174        state := s_wait_replace
175      }
176    }
177
178    is(s_wait_replace){
179      when(io.release_resp.valid && io.release_resp.bits === release_id){
180        state := s_write_back
181      }
182    }
183
184    is(s_write_back) {
185      state := Mux(io.meta_write.fire() && io.data_write.fire(), s_wait_resp, s_write_back)
186    }
187
188    is(s_wait_resp) {
189      io.resp.bits.data := respDataReg.asUInt
190      when(io.resp.fire()) {
191        state := s_idle
192      }
193    }
194  }
195
196  /** refill write and meta write */
197  val missCoh    = ClientMetadata(Nothing)
198  val grow_param = missCoh.onAccess(M_XRD)._2
199  val acquireBlock = edge.AcquireBlock(
200    fromSource = io.id,
201    toAddress = addrAlign(req.paddr, blockBytes, PAddrBits),
202    lgSize = (log2Up(cacheParams.blockBytes)).U,
203    growPermissions = grow_param
204  )._2
205  io.mem_acquire.bits := acquireBlock
206  // resolve cache alias by L2
207  io.mem_acquire.bits.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12))
208  require(nSets <= 256) // icache size should not be more than 128KB
209
210  /** Grant ACK */
211  io.mem_finish.valid := (state === s_send_grant_ack) && is_grant
212  io.mem_finish.bits := grantack
213
214  //resp to ifu
215  io.resp.valid := state === s_wait_resp
216  /** update coh meta */
217  def missCohGen(param: UInt, dirty: Bool): UInt = {
218    MuxLookup(Cat(param, dirty), Nothing, Seq(
219      Cat(toB, false.B) -> Branch,
220      Cat(toB, true.B)  -> Branch,
221      Cat(toT, false.B) -> Trunk,
222      Cat(toT, true.B)  -> Dirty))
223  }
224
225  val miss_new_coh = ClientMetadata(missCohGen(grant_param, is_dirty))
226
227  io.meta_write.valid := (state === s_write_back)
228  io.meta_write.bits.generate(tag = req_tag, coh = miss_new_coh, idx = req_idx, waymask = req_waymask, bankIdx = req_idx(0))
229
230  io.data_write.valid := (state === s_write_back)
231  io.data_write.bits.generate(data = respDataReg.asUInt, idx = req_idx, waymask = req_waymask, bankIdx = req_idx(0))
232
233  XSPerfAccumulate(
234    "entryPenalty" + Integer.toString(id, 10),
235    BoolStopWatch(
236      start = io.req.fire(),
237      stop = io.resp.valid,
238      startHighPriority = true)
239  )
240  XSPerfAccumulate("entryReq" + Integer.toString(id, 10), io.req.fire())
241
242}
243
244
245class ICacheMissUnit(edge: TLEdgeOut)(implicit p: Parameters) extends ICacheMissUnitModule
246{
247  val io = IO(new Bundle{
248    val req         = Vec(2, Flipped(DecoupledIO(new ICacheMissReq)))
249    val resp        = Vec(2, ValidIO(new ICacheMissResp))
250
251    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
252    val mem_grant   = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
253    val mem_finish  = DecoupledIO(new TLBundleE(edge.bundle))
254
255    val meta_write  = DecoupledIO(new ICacheMetaWriteBundle)
256    val data_write  = DecoupledIO(new ICacheDataWriteBundle)
257
258    val release_req    =  DecoupledIO(new ReplacePipeReq)
259    val release_resp   =  Flipped(ValidIO(UInt(ReplaceIdWid.W)))
260
261    val victimInfor = Vec(PortNumber, Output(new ICacheVictimInfor()))
262
263  })
264  // assign default values to output signals
265  io.mem_grant.ready := false.B
266
267  val meta_write_arb = Module(new Arbiter(new ICacheMetaWriteBundle,  PortNumber))
268  val refill_arb     = Module(new Arbiter(new ICacheDataWriteBundle,  PortNumber))
269  val release_arb    = Module(new Arbiter(new ReplacePipeReq,  PortNumber))
270
271  io.mem_grant.ready := true.B
272
273  val entries = (0 until PortNumber) map { i =>
274    val entry = Module(new ICacheMissEntry(edge, i))
275
276    entry.io.id := i.U
277
278    // entry req
279    entry.io.req.valid := io.req(i).valid
280    entry.io.req.bits  := io.req(i).bits
281    io.req(i).ready    := entry.io.req.ready
282
283    // entry resp
284    meta_write_arb.io.in(i)     <>  entry.io.meta_write
285    refill_arb.io.in(i)         <>  entry.io.data_write
286    release_arb.io.in(i)        <>  entry.io.release_req
287
288    entry.io.mem_grant.valid := false.B
289    entry.io.mem_grant.bits  := DontCare
290    when (io.mem_grant.bits.source === i.U) {
291      entry.io.mem_grant <> io.mem_grant
292    }
293
294    io.resp(i) <> entry.io.resp
295
296    io.victimInfor(i) := entry.io.victimInfor
297
298    entry.io.release_resp <> io.release_resp
299
300    XSPerfAccumulate(
301      "entryPenalty" + Integer.toString(i, 10),
302      BoolStopWatch(
303        start = entry.io.req.fire(),
304        stop = entry.io.resp.fire(),
305        startHighPriority = true)
306    )
307    XSPerfAccumulate("entryReq" + Integer.toString(i, 10), entry.io.req.fire())
308
309    entry
310  }
311
312  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
313  TLArbiter.lowest(edge, io.mem_finish,  entries.map(_.io.mem_finish):_*)
314
315  io.meta_write     <> meta_write_arb.io.out
316  io.data_write     <> refill_arb.io.out
317  io.release_req        <> release_arb.io.out
318
319  (0 until nWays).map{ w =>
320    XSPerfAccumulate("line_0_refill_way_" + Integer.toString(w, 10),  entries(0).io.meta_write.valid && OHToUInt(entries(0).io.meta_write.bits.waymask)  === w.U)
321    XSPerfAccumulate("line_1_refill_way_" + Integer.toString(w, 10),  entries(1).io.meta_write.valid && OHToUInt(entries(1).io.meta_write.bits.waymask)  === w.U)
322  }
323
324}
325
326
327
328