1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4 
5 //! A library to manipulate AArch64 VMSA page tables.
6 //!
7 //! Currently it only supports:
8 //!   - stage 1 page tables
9 //!   - 4 KiB pages
10 //!   - EL3, NS-EL2, NS-EL2&0 and NS-EL1&0 translation regimes
11 //!
12 //! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
13 //! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
14 //! provide an implementation of the [`Translation`] trait and then use [`Mapping`] directly.
15 //!
16 //! # Example
17 //!
18 //! ```no_run
19 //! # #[cfg(feature = "alloc")] {
20 //! use aarch64_paging::{
21 //!     idmap::IdMap,
22 //!     paging::{Attributes, MemoryRegion, TranslationRegime},
23 //! };
24 //!
25 //! const ASID: usize = 1;
26 //! const ROOT_LEVEL: usize = 1;
27 //! const NORMAL_CACHEABLE: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
28 //!
29 //! // Create a new EL1 page table with identity mapping.
30 //! let mut idmap = IdMap::new(ASID, ROOT_LEVEL, TranslationRegime::El1And0);
31 //! // Map a 2 MiB region of memory as read-write.
32 //! idmap.map_range(
33 //!     &MemoryRegion::new(0x80200000, 0x80400000),
34 //!     NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
35 //! ).unwrap();
36 //! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
37 //! unsafe {
38 //!     // Set `TTBR0_EL1` to activate the page table.
39 //!     idmap.activate();
40 //! }
41 //! # }
42 //! ```
43 
44 #![no_std]
45 #![deny(clippy::undocumented_unsafe_blocks)]
46 
47 #[cfg(feature = "alloc")]
48 pub mod idmap;
49 #[cfg(feature = "alloc")]
50 pub mod linearmap;
51 pub mod paging;
52 
53 #[cfg(feature = "alloc")]
54 extern crate alloc;
55 
56 #[cfg(target_arch = "aarch64")]
57 use core::arch::asm;
58 use core::fmt::{self, Display, Formatter};
59 use paging::{
60     Attributes, Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, Translation,
61     TranslationRegime, VaRange, VirtualAddress,
62 };
63 
64 /// An error attempting to map some range in the page table.
65 #[derive(Clone, Debug, Eq, PartialEq)]
66 pub enum MapError {
67     /// The address requested to be mapped was out of the range supported by the page table
68     /// configuration.
69     AddressRange(VirtualAddress),
70     /// The address requested to be mapped was not valid for the mapping in use.
71     InvalidVirtualAddress(VirtualAddress),
72     /// The end of the memory region is before the start.
73     RegionBackwards(MemoryRegion),
74     /// There was an error while updating a page table entry.
75     PteUpdateFault(Descriptor),
76     /// The requested flags are not supported for this mapping
77     InvalidFlags(Attributes),
78     /// Updating the range violates break-before-make rules and the mapping is live
79     BreakBeforeMakeViolation(MemoryRegion),
80 }
81 
82 impl Display for MapError {
fmt(&self, f: &mut Formatter) -> fmt::Result83     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
84         match self {
85             Self::AddressRange(va) => write!(f, "Virtual address {} out of range", va),
86             Self::InvalidVirtualAddress(va) => {
87                 write!(f, "Invalid virtual address {} for mapping", va)
88             }
89             Self::RegionBackwards(region) => {
90                 write!(f, "End of memory region {} is before start.", region)
91             }
92             Self::PteUpdateFault(desc) => {
93                 write!(f, "Error updating page table entry {:?}", desc)
94             }
95             Self::InvalidFlags(flags) => {
96                 write!(f, "Flags {flags:?} unsupported for mapping.")
97             }
98             Self::BreakBeforeMakeViolation(region) => {
99                 write!(f, "Cannot remap region {region} while translation is live.")
100             }
101         }
102     }
103 }
104 
105 /// Manages a level 1 page table and associated state.
106 ///
107 /// Mappings should be added with [`map_range`](Self::map_range) before calling
108 /// [`activate`](Self::activate) to start using the new page table. To make changes which may
109 /// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
110 /// switch back to a previous static page table, and then `activate` again after making the desired
111 /// changes.
112 #[derive(Debug)]
113 pub struct Mapping<T: Translation> {
114     root: RootTable<T>,
115     #[allow(unused)]
116     asid: usize,
117     #[allow(unused)]
118     previous_ttbr: Option<usize>,
119 }
120 
121 impl<T: Translation> Mapping<T> {
122     /// Creates a new page table with the given ASID, root level and translation mapping.
new( translation: T, asid: usize, rootlevel: usize, translation_regime: TranslationRegime, va_range: VaRange, ) -> Self123     pub fn new(
124         translation: T,
125         asid: usize,
126         rootlevel: usize,
127         translation_regime: TranslationRegime,
128         va_range: VaRange,
129     ) -> Self {
130         if !translation_regime.supports_asid() && asid != 0 {
131             panic!("{:?} doesn't support ASID, must be 0.", translation_regime);
132         }
133         Self {
134             root: RootTable::new(translation, rootlevel, translation_regime, va_range),
135             asid,
136             previous_ttbr: None,
137         }
138     }
139 
140     /// Returns whether this mapping is currently active.
active(&self) -> bool141     pub fn active(&self) -> bool {
142         self.previous_ttbr.is_some()
143     }
144 
145     /// Activates the page table by setting `TTBRn_ELx` to point to it, and saves the previous value
146     /// of `TTBRn_ELx` so that it may later be restored by [`deactivate`](Self::deactivate).
147     ///
148     /// Panics if a previous value of `TTBRn_ELx` is already saved and not yet used by a call to
149     /// `deactivate`.
150     ///
151     /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
152     ///
153     /// # Safety
154     ///
155     /// The caller must ensure that the page table doesn't unmap any memory which the program is
156     /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
157     /// dropped as long as its mappings are required, as it will automatically be deactivated when
158     /// it is dropped.
activate(&mut self)159     pub unsafe fn activate(&mut self) {
160         assert!(!self.active());
161 
162         #[allow(unused)]
163         let mut previous_ttbr = usize::MAX;
164 
165         #[cfg(all(not(test), target_arch = "aarch64"))]
166         // SAFETY: Safe because we trust that self.root_address() returns a valid physical address
167         // of a page table, and the `Drop` implementation will reset `TTBRn_ELx` before it becomes
168         // invalid.
169         unsafe {
170             match (self.root.translation_regime(), self.root.va_range()) {
171                 (TranslationRegime::El1And0, VaRange::Lower) => asm!(
172                     "mrs   {previous_ttbr}, ttbr0_el1",
173                     "msr   ttbr0_el1, {ttbrval}",
174                     "isb",
175                     ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
176                     previous_ttbr = out(reg) previous_ttbr,
177                     options(preserves_flags),
178                 ),
179                 (TranslationRegime::El1And0, VaRange::Upper) => asm!(
180                     "mrs   {previous_ttbr}, ttbr1_el1",
181                     "msr   ttbr1_el1, {ttbrval}",
182                     "isb",
183                     ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
184                     previous_ttbr = out(reg) previous_ttbr,
185                     options(preserves_flags),
186                 ),
187                 (TranslationRegime::El2And0, VaRange::Lower) => asm!(
188                     "mrs   {previous_ttbr}, ttbr0_el2",
189                     "msr   ttbr0_el2, {ttbrval}",
190                     "isb",
191                     ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
192                     previous_ttbr = out(reg) previous_ttbr,
193                     options(preserves_flags),
194                 ),
195                 (TranslationRegime::El2And0, VaRange::Upper) => asm!(
196                     "mrs   {previous_ttbr}, s3_4_c2_c0_1", // ttbr1_el2
197                     "msr   s3_4_c2_c0_1, {ttbrval}",
198                     "isb",
199                     ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
200                     previous_ttbr = out(reg) previous_ttbr,
201                     options(preserves_flags),
202                 ),
203                 (TranslationRegime::El2, VaRange::Lower) => asm!(
204                     "mrs   {previous_ttbr}, ttbr0_el2",
205                     "msr   ttbr0_el2, {ttbrval}",
206                     "isb",
207                     ttbrval = in(reg) self.root_address().0,
208                     previous_ttbr = out(reg) previous_ttbr,
209                     options(preserves_flags),
210                 ),
211                 (TranslationRegime::El3, VaRange::Lower) => asm!(
212                     "mrs   {previous_ttbr}, ttbr0_el3",
213                     "msr   ttbr0_el3, {ttbrval}",
214                     "isb",
215                     ttbrval = in(reg) self.root_address().0,
216                     previous_ttbr = out(reg) previous_ttbr,
217                     options(preserves_flags),
218                 ),
219                 _ => {
220                     panic!("Invalid combination of exception level and VA range.");
221                 }
222             }
223         }
224         self.mark_active(previous_ttbr);
225     }
226 
227     /// Deactivates the page table, by setting `TTBRn_ELx` back to the value it had before
228     /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
229     /// configured ASID.
230     ///
231     /// Panics if there is no saved `TTBRn_ELx` value because `activate` has not previously been
232     /// called.
233     ///
234     /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
235     ///
236     /// # Safety
237     ///
238     /// The caller must ensure that the previous page table which this is switching back to doesn't
239     /// unmap any memory which the program is using.
deactivate(&mut self)240     pub unsafe fn deactivate(&mut self) {
241         assert!(self.active());
242 
243         #[cfg(all(not(test), target_arch = "aarch64"))]
244         // SAFETY: Safe because this just restores the previously saved value of `TTBRn_ELx`, which
245         // must have been valid.
246         unsafe {
247             match (self.root.translation_regime(), self.root.va_range()) {
248                 (TranslationRegime::El1And0, VaRange::Lower) => asm!(
249                     "msr   ttbr0_el1, {ttbrval}",
250                     "isb",
251                     "tlbi  aside1, {asid}",
252                     "dsb   nsh",
253                     "isb",
254                     asid = in(reg) self.asid << 48,
255                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
256                     options(preserves_flags),
257                 ),
258                 (TranslationRegime::El1And0, VaRange::Upper) => asm!(
259                     "msr   ttbr1_el1, {ttbrval}",
260                     "isb",
261                     "tlbi  aside1, {asid}",
262                     "dsb   nsh",
263                     "isb",
264                     asid = in(reg) self.asid << 48,
265                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
266                     options(preserves_flags),
267                 ),
268                 (TranslationRegime::El2And0, VaRange::Lower) => asm!(
269                     "msr   ttbr0_el2, {ttbrval}",
270                     "isb",
271                     "tlbi  aside1, {asid}",
272                     "dsb   nsh",
273                     "isb",
274                     asid = in(reg) self.asid << 48,
275                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
276                     options(preserves_flags),
277                 ),
278                 (TranslationRegime::El2And0, VaRange::Upper) => asm!(
279                     "msr   s3_4_c2_c0_1, {ttbrval}", // ttbr1_el2
280                     "isb",
281                     "tlbi  aside1, {asid}",
282                     "dsb   nsh",
283                     "isb",
284                     asid = in(reg) self.asid << 48,
285                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
286                     options(preserves_flags),
287                 ),
288                 (TranslationRegime::El2, VaRange::Lower) => {
289                     panic!("EL2 page table can't safety be deactivated.");
290                 }
291                 (TranslationRegime::El3, VaRange::Lower) => {
292                     panic!("EL3 page table can't safety be deactivated.");
293                 }
294                 _ => {
295                     panic!("Invalid combination of exception level and VA range.");
296                 }
297             }
298         }
299         self.mark_inactive();
300     }
301 
302     /// Checks whether the given range can be mapped or updated while the translation is live,
303     /// without violating architectural break-before-make (BBM) requirements.
check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,304     fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
305     where
306         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
307     {
308         self.root.visit_range(
309             range,
310             &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
311                 if d.is_valid() {
312                     let err = MapError::BreakBeforeMakeViolation(mr.clone());
313 
314                     if !mr.is_block(level) {
315                         // Cannot split a live block mapping
316                         return Err(err);
317                     }
318 
319                     // Get the new flags and output address for this descriptor by applying
320                     // the updater function to a copy
321                     let (flags, oa) = {
322                         let mut dd = *d;
323                         updater(mr, &mut dd, level).or(Err(err.clone()))?;
324                         (dd.flags().ok_or(err.clone())?, dd.output_address())
325                     };
326 
327                     if !flags.contains(Attributes::VALID) {
328                         // Removing the valid bit is always ok
329                         return Ok(());
330                     }
331 
332                     if oa != d.output_address() {
333                         // Cannot change output address on a live mapping
334                         return Err(err);
335                     }
336 
337                     let desc_flags = d.flags().unwrap();
338 
339                     if (desc_flags ^ flags).intersects(
340                         Attributes::ATTRIBUTE_INDEX_MASK | Attributes::SHAREABILITY_MASK,
341                     ) {
342                         // Cannot change memory type
343                         return Err(err);
344                     }
345 
346                     if (desc_flags - flags).contains(Attributes::NON_GLOBAL) {
347                         // Cannot convert from non-global to global
348                         return Err(err);
349                     }
350                 }
351                 Ok(())
352             },
353         )
354     }
355 
356     /// Maps the given range of virtual addresses to the corresponding range of physical addresses
357     /// starting at `pa`, with the given flags, taking the given constraints into account.
358     ///
359     /// This should generally only be called while the page table is not active. In particular, any
360     /// change that may require break-before-make per the architecture must be made while the page
361     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
362     /// table is active. This function writes block and page entries, but only maps them if `flags`
363     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
364     ///
365     /// # Errors
366     ///
367     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
368     ///
369     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
370     /// largest virtual address covered by the page table given its root level.
371     ///
372     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
373     ///
374     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
375     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range( &mut self, range: &MemoryRegion, pa: PhysicalAddress, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>376     pub fn map_range(
377         &mut self,
378         range: &MemoryRegion,
379         pa: PhysicalAddress,
380         flags: Attributes,
381         constraints: Constraints,
382     ) -> Result<(), MapError> {
383         if self.active() {
384             let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| {
385                 let mask = !(paging::granularity_at_level(lvl) - 1);
386                 let pa = (mr.start() - range.start() + pa.0) & mask;
387                 d.set(PhysicalAddress(pa), flags);
388                 Ok(())
389             };
390             self.check_range_bbm(range, &c)?;
391         }
392         self.root.map_range(range, pa, flags, constraints)?;
393         #[cfg(target_arch = "aarch64")]
394         // SAFETY: Safe because this is just a memory barrier.
395         unsafe {
396             asm!("dsb ishst");
397         }
398         Ok(())
399     }
400 
401     /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
402     ///
403     /// This may involve splitting block entries if the provided range is not currently mapped
404     /// down to its precise boundaries. For visiting all the descriptors covering a memory range
405     /// without potential splitting (and no descriptor updates), use
406     /// [`walk_range`](Self::walk_range) instead.
407     ///
408     /// This should generally only be called while the page table is not active. In particular, any
409     /// change that may require break-before-make per the architecture must be made while the page
410     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
411     /// table is active.
412     ///
413     /// # Errors
414     ///
415     /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
416     ///
417     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
418     ///
419     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
420     /// largest virtual address covered by the page table given its root level.
421     ///
422     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
423     /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,424     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
425     where
426         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
427     {
428         if self.active() {
429             self.check_range_bbm(range, f)?;
430         }
431         self.root.modify_range(range, f)?;
432         #[cfg(target_arch = "aarch64")]
433         // SAFETY: Safe because this is just a memory barrier.
434         unsafe {
435             asm!("dsb ishst");
436         }
437         Ok(())
438     }
439 
440     /// Applies the provided function to a number of PTEs corresponding to a given memory range.
441     ///
442     /// The virtual address range passed to the callback function may be expanded compared to the
443     /// `range` parameter, due to alignment to block boundaries.
444     ///
445     /// # Errors
446     ///
447     /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
448     ///
449     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
450     ///
451     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
452     /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,453     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
454     where
455         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
456     {
457         self.root.walk_range(range, f)
458     }
459 
460     /// Returns the physical address of the root table.
461     ///
462     /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
463     /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
464     /// [`mark_active`](Self::mark_active) after doing so.
root_address(&self) -> PhysicalAddress465     pub fn root_address(&self) -> PhysicalAddress {
466         self.root.to_physical()
467     }
468 
469     /// Marks the page table as active.
470     ///
471     /// This should be called if the page table is manually activated by calling
472     /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
473     /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
474     /// checks to avoid violating break-before-make requirements.
475     ///
476     /// It is called automatically by [`activate`](Self::activate).
mark_active(&mut self, previous_ttbr: usize)477     pub fn mark_active(&mut self, previous_ttbr: usize) {
478         self.previous_ttbr = Some(previous_ttbr);
479     }
480 
481     /// Marks the page table as inactive.
482     ///
483     /// This may be called after manually disabling the use of the page table, such as by setting
484     /// the relevant TTBR to a different address.
485     ///
486     /// It is called automatically by [`deactivate`](Self::deactivate).
mark_inactive(&mut self)487     pub fn mark_inactive(&mut self) {
488         self.previous_ttbr = None;
489     }
490 }
491 
492 impl<T: Translation> Drop for Mapping<T> {
drop(&mut self)493     fn drop(&mut self) {
494         if self.previous_ttbr.is_some() {
495             #[cfg(target_arch = "aarch64")]
496             // SAFETY: When activate was called the caller promised that they wouldn't drop the page
497             // table until its mappings were no longer needed.
498             unsafe {
499                 self.deactivate();
500             }
501         }
502     }
503 }
504 
505 #[cfg(test)]
506 mod tests {
507     #[cfg(feature = "alloc")]
508     use self::idmap::IdTranslation;
509     #[cfg(feature = "alloc")]
510     use super::*;
511 
512     #[cfg(feature = "alloc")]
513     #[test]
514     #[should_panic]
no_el2_asid()515     fn no_el2_asid() {
516         Mapping::new(IdTranslation, 1, 1, TranslationRegime::El2, VaRange::Lower);
517     }
518 
519     #[cfg(feature = "alloc")]
520     #[test]
521     #[should_panic]
no_el3_asid()522     fn no_el3_asid() {
523         Mapping::new(IdTranslation, 1, 1, TranslationRegime::El3, VaRange::Lower);
524     }
525 }
526