1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.backend.fu 18 19import chisel3._ 20import chisel3.util._ 21import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegReadFn, RegWriteFn} 22import utility.{ParallelPriorityMux, ValidHold, ZeroExt} 23import xiangshan.cache.mmu.TlbCmd 24 25import scala.collection.mutable.ListBuffer 26 27/* Memory Mapped PMA */ 28case class MMPMAConfig 29( 30 address: BigInt, 31 mask: BigInt, 32 lgMaxSize: Int, 33 sameCycle: Boolean, 34 num: Int 35) 36 37case class MemoryRange(lower: BigInt, upper: BigInt) { 38 def cover(addr: BigInt): Boolean = addr >= lower && addr < upper 39 def cover(addr: UInt): Bool = addr >= lower.U && addr < upper.U 40} 41 42case class PMAConfigEntry( 43 base_addr: BigInt, 44 range: BigInt = 0L, // only use for napot mode 45 l: Boolean = false, 46 c: Boolean = false, 47 atomic: Boolean = false, 48 a: Int = 0, 49 x: Boolean = false, 50 w: Boolean = false, 51 r: Boolean = false 52) 53 54trait PMAConst extends PMPConst 55 56trait MMPMAMethod extends PMAConst with PMAMethod with PMPReadWriteMethodBare { 57 def gen_mmpma_mapping(num: Int) = { 58 val pmaCfgPerCSR = PMXLEN / new PMPConfig().getWidth 59 def pmaCfgLogicIndex(i: Int) = (PMXLEN / 32) * (i / pmaCfgPerCSR) 60 def pmaCfgIndex(i: Int) = (i / pmaCfgPerCSR) 61 62 val pma = Wire(Vec(num, new PMPEntry)) 63 64 /* pma init value */ 65 val init_value = pma_init() 66 67 val pmaCfgMerged = RegInit(init_value._1) 68 val addr = RegInit(init_value._2) 69 val mask = RegInit(init_value._3) 70 val cfg = WireInit(pmaCfgMerged).asTypeOf(Vec(num, new PMPConfig())) 71 // pmaMask are implicit regs that just used for timing optimization 72 for (i <- pma.indices) { 73 pma(i).gen(cfg(i), addr(i), mask(i)) 74 } 75 76 val blankCfg = PMXLEN == 32 77 val cfg_index_wrapper = (0 until num by 4).zip((0 until num by 4).map(a => blankCfg || (a % pmaCfgPerCSR == 0))) 78 val cfg_map = (cfg_index_wrapper).map{ case(i, notempty) => { 79// println(s"tlbpma i:$i notempty:$notempty") 80 RegField.apply(n = PMXLEN, r = RegReadFn{(ivalid, oready) => 81 val r_ready = Wire(Bool()) 82 val o_valid = Wire(Bool()) 83 val v_reg = ValidHold(r_ready && ivalid, o_valid && oready, false.B) 84 r_ready := !v_reg 85 o_valid := v_reg 86 87 if (notempty) { (r_ready, o_valid, pmaCfgMerged(pmaCfgIndex(i))) } 88 else { (r_ready, o_valid, 0.U) } 89 }, w = RegWriteFn((valid, data) => { 90 if (notempty) { when (valid) { pmaCfgMerged(pmaCfgIndex(i)) := write_cfg_vec(mask, addr, i, pmaCfgMerged(pmaCfgIndex(i)))(data) } } 91 true.B 92 }), desc = RegFieldDesc(s"MMPMA_config_${i}", s"pma config register #${i}")) 93 }} 94 95 val addr_map = (0 until num).map{ i => { 96 val next_cfg = if (i == 0) 0.U.asTypeOf(new PMPConfig()) else cfg(i-1) 97 RegField( 98 n = PMXLEN, 99 r = ZeroExt(read_addr(cfg(i))(addr(i)), PMXLEN), 100 w = RegWriteFn((valid, data) => { 101 when (valid) { addr(i) := write_addr(next_cfg, mask(i))(data(addr(0).getWidth-1, 0), cfg(i), addr(i))} 102 true.B 103 }), 104 desc = RegFieldDesc(s"MMPMA_addr_${i}", s"pma addr register #${i}") 105 ) 106 }} 107 108 (cfg_map, addr_map, pma) 109 } 110 111} 112 113trait PMAMethod extends PMAConst { 114 /** 115 def SimpleMemMapList = List( 116 // Base address Top address Width Description Mode (RWXIDSAC) 117 MemMap("h00_0000_0000", "h00_0FFF_FFFF", "h0", "Reserved", "RW"), 118 MemMap("h00_1000_0000", "h00_1FFF_FFFF", "h0", "QSPI_Flash", "RWX"), 119 MemMap("h00_2000_0000", "h00_2FFF_FFFF", "h0", "Reserved", "RW"), 120 MemMap("h00_3000_0000", "h00_3000_FFFF", "h0", "DMA", "RW"), 121 MemMap("h00_3001_0000", "h00_3004_FFFF", "h0", "GPU", "RWC"), 122 MemMap("h00_3005_0000", "h00_3006_FFFF", "h0", "USB/SDMMC", "RW"), 123 MemMap("h00_3007_0000", "h00_30FF_FFFF", "h0", "Reserved", "RW"), 124 MemMap("h00_3100_0000", "h00_3111_FFFF", "h0", "MMIO", "RW"), 125 MemMap("h00_3112_0000", "h00_37FF_FFFF", "h0", "Reserved", "RW"), 126 MemMap("h00_3800_0000", "h00_3800_FFFF", "h0", "CLINT", "RW"), 127 MemMap("h00_3801_0000", "h00_3801_FFFF", "h0", "BEU", "RW"), 128 MemMap("h00_3802_0000", "h00_3802_0FFF", "h0", "DebugModule", "RWX"), 129 MemMap("h00_3802_1000", "h00_3802_1FFF", "h0", "MMPMA", "RW"), 130 MemMap("h00_3802_2000", "h00_3802_207F", "h0", "L1DCacheCtrl", "RW"), 131 MemMap("h00_3802_2080", "h00_3802_20FF", "h0", "L1ICacheCtrl", "RW"), 132 MemMap("h00_3802_2100", "h00_38FF_FFFF", "h0", "Reserved", ""), 133 MemMap("h00_3900_0000", "h00_3900_1FFF", "h0", "L3CacheCtrl", "RW"), 134 MemMap("h00_3900_2000", "h00_39FF_FFFF", "h0", "Reserved", ""), 135 MemMap("h00_3A00_0000", "h00_3FFF_FFFF", "h0", "", "RW), 136 Sub("h00_3A00_0000", "h00_3A00_0FFF", "h0", "PLL0", "RW), 137 Sub('h00_3A00_1000", "h00_3A7F_FFFF", "h0", "Reserved", "RW"), 138 Sub('h00_3A80_0000", "h00_3AFF_FFFF", "h0", "IMSIC(M)", "RW"), 139 Sub('h00_3B00_0000", "h00_3BFF_FFFF", "h0", "IMSIC(S/VS)", "RW"), 140 Sub("h00_3C00_0000", "h00_3FFF_FFFF", "h0", "PLIC", "RW"), 141 MemMap("h00_4000_0000", "h00_7FFF_FFFF", "h0", "PCIe", "RW"), 142 MemMap("h00_8000_0000", "h7FF_FFFF_FFFF", "h0", "DDR", "RWXIDSA"), 143 ) 144 */ 145 146 def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = { 147 def genAddr(init_addr: BigInt) = { 148 init_addr.U((PMPAddrBits - PMPOffBits).W) 149 } 150 def genMask(init_addr: BigInt, a: BigInt) = { 151 val match_mask_addr = (init_addr << 1) | (a & 0x1) | (((1 << PlatformGrain) - 1) >> PMPOffBits) 152 val mask = ((match_mask_addr & ~(match_mask_addr + 1)) << PMPOffBits) | ((1 << PMPOffBits) - 1) 153 mask.U(PMPAddrBits.W) 154 } 155 156 val num = NumPMA 157 require(num >= 16) 158 159 val cfg_list = ListBuffer[UInt]() 160 val addr_list = ListBuffer[UInt]() 161 val mask_list = ListBuffer[UInt]() 162 163 def addPMA(conf: PMAConfigEntry) = { 164 val addr = if (conf.a < 2) { shift_addr(conf.base_addr) } 165 else { get_napot(conf.base_addr, conf.range) } 166 cfg_list.append(PMPConfigUInt(conf.l, conf.c, conf.atomic, conf.a, conf.x, conf.w, conf.r)) 167 addr_list.append(genAddr(addr)) 168 mask_list.append(genMask(addr, conf.a)) 169 } 170 171 PMAConfigs.foreach(addPMA) 172 while (cfg_list.length < 16) { 173 addPMA(PMAConfigEntry(0)) 174 } 175 176 val cfgInitMerge = Seq.tabulate(num / 8)(i => { 177 cfg_list.reverse.drop(8 * i).take(8).foldRight(BigInt(0L)) { case (a, result) => 178 (result << a.getWidth) | a.litValue 179 }.U(PMXLEN.W) 180 }) 181 val addr = addr_list.reverse 182 val mask = mask_list.reverse 183 (VecInit(cfgInitMerge), VecInit(addr.toSeq), VecInit(mask.toSeq)) 184 } 185 186 def get_napot(base: BigInt, range: BigInt): BigInt = { 187 val PlatformGrainBytes = (1 << PlatformGrain) 188 if ((base % PlatformGrainBytes) != 0) { 189 println("base:%x", base) 190 } 191 if ((range % PlatformGrainBytes) != 0) { 192 println("range: %x", range) 193 } 194 require((base % PlatformGrainBytes) == 0) 195 require((range % PlatformGrainBytes) == 0) 196 197 ((base + (range/2 - 1)) >> PMPOffBits) 198 } 199 200 def match_mask(paddr: UInt, cfg: PMPConfig) = { 201 val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 202 Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 203 } 204 205 def shift_addr(addr: BigInt) = { 206 addr >> 2 207 } 208} 209 210trait PMACheckMethod extends PMPConst { 211 def pma_check(cmd: UInt, cfg: PMPConfig) = { 212 val resp = Wire(new PMPRespBundle) 213 resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r 214 resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd) && cfg.atomic) && !cfg.w 215 resp.instr := TlbCmd.isExec(cmd) && !cfg.x 216 //TODO We require that a `PMA` can generate an mmio response only if the address has the appropriate `PMA` permissions. 217 resp.mmio := !cfg.c && 218 (TlbCmd.isRead(cmd) && cfg.r || 219 (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd) && cfg.atomic) && cfg.w || 220 TlbCmd.isExec(cmd) && cfg.x) 221 resp.atomic := cfg.atomic 222 resp 223 } 224 225 def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 226 addr: UInt, 227 size: UInt, 228 pmaEntries: Vec[PMPEntry], 229 mode: UInt, 230 lgMaxSize: Int 231 ) = { 232 val num = pmaEntries.size 233 require(num == NumPMA) 234 // pma should always be checked, could not be ignored 235 // like amo and cached, it is the attribute not protection 236 // so it must have initialization. 237 require(!pmaEntries.isEmpty) 238 239 val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 240 val match_vec = Wire(Vec(num+1, Bool())) 241 val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 242 243 pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) => 244 val is_match = pma.is_match(addr, size, lgMaxSize, last_pma) 245 val aligned = pma.aligned(addr, size, lgMaxSize, last_pma) 246 247 val cur = WireInit(pma) 248 cur.cfg.r := aligned && pma.cfg.r 249 cur.cfg.w := aligned && pma.cfg.w 250 cur.cfg.x := aligned && pma.cfg.x 251 cur.cfg.atomic := aligned && pma.cfg.atomic 252 cur.cfg.c := aligned && pma.cfg.c 253 254 match_vec(i) := is_match 255 cfg_vec(i) := cur 256 } 257 258 match_vec(num) := true.B 259 cfg_vec(num) := pmaDefault 260 if (leaveHitMux) { 261 ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid)) 262 } else { 263 ParallelPriorityMux(match_vec, cfg_vec) 264 } 265 } 266} 267