1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Page table management.
16 
17 use crate::read_sysreg;
18 use aarch64_paging::idmap::IdMap;
19 use aarch64_paging::paging::{
20     Attributes, Constraints, Descriptor, MemoryRegion, TranslationRegime,
21 };
22 use aarch64_paging::MapError;
23 use core::result;
24 
25 /// Software bit used to indicate a device that should be lazily mapped.
26 pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
27 
28 /// We assume that MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100)
29 const DEVICE_NGNRE: Attributes = Attributes::ATTRIBUTE_INDEX_0;
30 
31 /// We assume that MAIR_EL1.Attr1 = "Normal memory, Outer & Inner WB Non-transient, R/W-Allocate"
32 /// (0b1111_1111)
33 const NORMAL: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
34 
35 const MEMORY: Attributes =
36     Attributes::VALID.union(NORMAL).union(Attributes::NON_GLOBAL).union(Attributes::ACCESSED);
37 const DEVICE_LAZY: Attributes =
38     MMIO_LAZY_MAP_FLAG.union(DEVICE_NGNRE).union(Attributes::UXN).union(Attributes::ACCESSED);
39 const DEVICE: Attributes = DEVICE_LAZY.union(Attributes::VALID);
40 const CODE: Attributes = MEMORY.union(Attributes::READ_ONLY);
41 const DATA: Attributes = MEMORY.union(Attributes::UXN);
42 const RODATA: Attributes = DATA.union(Attributes::READ_ONLY);
43 const DATA_DBM: Attributes = RODATA.union(Attributes::DBM);
44 
45 type Result<T> = result::Result<T, MapError>;
46 
47 /// High-level API for managing MMU mappings.
48 pub struct PageTable {
49     idmap: IdMap,
50 }
51 
52 impl From<IdMap> for PageTable {
from(idmap: IdMap) -> Self53     fn from(idmap: IdMap) -> Self {
54         Self { idmap }
55     }
56 }
57 
58 impl Default for PageTable {
default() -> Self59     fn default() -> Self {
60         const TCR_EL1_TG0_MASK: usize = 0x3;
61         const TCR_EL1_TG0_SHIFT: u32 = 14;
62         const TCR_EL1_TG0_SIZE_4KB: usize = 0b00;
63 
64         const TCR_EL1_T0SZ_MASK: usize = 0x3f;
65         const TCR_EL1_T0SZ_SHIFT: u32 = 0;
66         const TCR_EL1_T0SZ_39_VA_BITS: usize = 64 - 39;
67 
68         // Ensure that entry.S wasn't changed without updating the assumptions about TCR_EL1 here.
69         let tcr_el1 = read_sysreg!("tcr_el1");
70         assert_eq!((tcr_el1 >> TCR_EL1_TG0_SHIFT) & TCR_EL1_TG0_MASK, TCR_EL1_TG0_SIZE_4KB);
71         assert_eq!((tcr_el1 >> TCR_EL1_T0SZ_SHIFT) & TCR_EL1_T0SZ_MASK, TCR_EL1_T0SZ_39_VA_BITS);
72 
73         IdMap::new(Self::ASID, Self::ROOT_LEVEL, TranslationRegime::El1And0).into()
74     }
75 }
76 
77 impl PageTable {
78     /// ASID used for the underlying page table.
79     pub const ASID: usize = 1;
80 
81     /// Level of the underlying page table's root page.
82     const ROOT_LEVEL: usize = 1;
83 
84     /// Activates the page table.
85     ///
86     /// # Safety
87     ///
88     /// The caller must ensure that the PageTable instance has valid and identical mappings for the
89     /// code being currently executed. Otherwise, the Rust execution model (on which the borrow
90     /// checker relies) would be violated.
activate(&mut self)91     pub unsafe fn activate(&mut self) {
92         // SAFETY: the caller of this unsafe function asserts that switching to a different
93         // translation is safe
94         unsafe { self.idmap.activate() }
95     }
96 
97     /// Maps the given range of virtual addresses to the physical addresses as lazily mapped
98     /// nGnRE device memory.
map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()>99     pub fn map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()> {
100         self.idmap.map_range(range, DEVICE_LAZY)
101     }
102 
103     /// Maps the given range of virtual addresses to the physical addresses as valid device
104     /// nGnRE device memory.
map_device(&mut self, range: &MemoryRegion) -> Result<()>105     pub fn map_device(&mut self, range: &MemoryRegion) -> Result<()> {
106         self.idmap.map_range(range, DEVICE)
107     }
108 
109     /// Maps the given range of virtual addresses to the physical addresses as non-executable
110     /// and writable normal memory.
map_data(&mut self, range: &MemoryRegion) -> Result<()>111     pub fn map_data(&mut self, range: &MemoryRegion) -> Result<()> {
112         self.idmap.map_range(range, DATA)
113     }
114 
115     /// Maps the given range of virtual addresses to the physical addresses as non-executable,
116     /// read-only and writable-clean normal memory.
map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()>117     pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
118         // Map the region down to pages to minimize the size of the regions that will be marked
119         // dirty once a store hits them, but also to ensure that we can clear the read-only
120         // attribute while the mapping is live without causing break-before-make (BBM) violations.
121         // The latter implies that we must avoid the use of the contiguous hint as well.
122         self.idmap.map_range_with_constraints(
123             range,
124             DATA_DBM,
125             Constraints::NO_BLOCK_MAPPINGS | Constraints::NO_CONTIGUOUS_HINT,
126         )
127     }
128 
129     /// Maps the given range of virtual addresses to the physical addresses as read-only
130     /// normal memory.
map_code(&mut self, range: &MemoryRegion) -> Result<()>131     pub fn map_code(&mut self, range: &MemoryRegion) -> Result<()> {
132         self.idmap.map_range(range, CODE)
133     }
134 
135     /// Maps the given range of virtual addresses to the physical addresses as non-executable
136     /// and read-only normal memory.
map_rodata(&mut self, range: &MemoryRegion) -> Result<()>137     pub fn map_rodata(&mut self, range: &MemoryRegion) -> Result<()> {
138         self.idmap.map_range(range, RODATA)
139     }
140 
141     /// Applies the provided updater function to a number of PTEs corresponding to a given memory
142     /// range.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>,143     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()>
144     where
145         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>,
146     {
147         self.idmap.modify_range(range, f)
148     }
149 
150     /// Applies the provided callback function to a number of PTEs corresponding to a given memory
151     /// range.
walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()> where F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,152     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()>
153     where
154         F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,
155     {
156         let mut callback = |mr: &MemoryRegion, d: &Descriptor, l: usize| f(mr, d, l);
157         self.idmap.walk_range(range, &mut callback)
158     }
159 }
160