1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4 
5 //! Functionality for managing page tables with identity mapping.
6 //!
7 //! See [`IdMap`] for details on how to use it.
8 
9 use crate::{
10     paging::{
11         deallocate, Attributes, Constraints, Descriptor, MemoryRegion, PageTable, PhysicalAddress,
12         Translation, TranslationRegime, VaRange, VirtualAddress,
13     },
14     MapError, Mapping,
15 };
16 use core::ptr::NonNull;
17 
18 /// Identity mapping, where every virtual address is either unmapped or mapped to the identical IPA.
19 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
20 pub struct IdTranslation;
21 
22 impl IdTranslation {
virtual_to_physical(va: VirtualAddress) -> PhysicalAddress23     fn virtual_to_physical(va: VirtualAddress) -> PhysicalAddress {
24         PhysicalAddress(va.0)
25     }
26 }
27 
28 impl Translation for IdTranslation {
allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress)29     fn allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress) {
30         let table = PageTable::new();
31 
32         // Physical address is the same as the virtual address because we are using identity mapping
33         // everywhere.
34         (table, PhysicalAddress(table.as_ptr() as usize))
35     }
36 
deallocate_table(&mut self, page_table: NonNull<PageTable>)37     unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable>) {
38         deallocate(page_table);
39     }
40 
physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>41     fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
42         NonNull::new(pa.0 as *mut PageTable).expect("Got physical address 0 for pagetable")
43     }
44 }
45 
46 /// Manages a level 1 page table using identity mapping, where every virtual address is either
47 /// unmapped or mapped to the identical IPA.
48 ///
49 /// This assumes that identity mapping is used both for the page table being managed, and for code
50 /// that is managing it.
51 ///
52 /// Mappings should be added with [`map_range`](Self::map_range) before calling
53 /// [`activate`](Self::activate) to start using the new page table. To make changes which may
54 /// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
55 /// switch back to a previous static page table, and then `activate` again after making the desired
56 /// changes.
57 ///
58 /// # Example
59 ///
60 /// ```no_run
61 /// use aarch64_paging::{
62 ///     idmap::IdMap,
63 ///     paging::{Attributes, MemoryRegion, TranslationRegime},
64 /// };
65 ///
66 /// const ASID: usize = 1;
67 /// const ROOT_LEVEL: usize = 1;
68 /// const NORMAL_CACHEABLE: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
69 ///
70 /// // Create a new EL1 page table with identity mapping.
71 /// let mut idmap = IdMap::new(ASID, ROOT_LEVEL, TranslationRegime::El1And0);
72 /// // Map a 2 MiB region of memory as read-write.
73 /// idmap.map_range(
74 ///     &MemoryRegion::new(0x80200000, 0x80400000),
75 ///     NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
76 /// ).unwrap();
77 /// // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
78 /// unsafe {
79 ///     // Set `TTBR0_EL1` to activate the page table.
80 ///     idmap.activate();
81 /// }
82 ///
83 /// // Write something to the memory...
84 ///
85 /// // SAFETY: The program will only use memory within the initially mapped region until `idmap` is
86 /// // reactivated below.
87 /// unsafe {
88 ///     // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
89 ///     idmap.deactivate();
90 /// }
91 /// // Now change the mapping to read-only and executable.
92 /// idmap.map_range(
93 ///     &MemoryRegion::new(0x80200000, 0x80400000),
94 ///     NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID
95 ///     | Attributes::ACCESSED,
96 /// ).unwrap();
97 /// // SAFETY: Everything the program will used is mapped in by this page table.
98 /// unsafe {
99 ///     idmap.activate();
100 /// }
101 /// ```
102 #[derive(Debug)]
103 pub struct IdMap {
104     mapping: Mapping<IdTranslation>,
105 }
106 
107 impl IdMap {
108     /// Creates a new identity-mapping page table with the given ASID and root level.
new(asid: usize, rootlevel: usize, translation_regime: TranslationRegime) -> Self109     pub fn new(asid: usize, rootlevel: usize, translation_regime: TranslationRegime) -> Self {
110         Self {
111             mapping: Mapping::new(
112                 IdTranslation,
113                 asid,
114                 rootlevel,
115                 translation_regime,
116                 VaRange::Lower,
117             ),
118         }
119     }
120 
121     /// Activates the page table by setting `TTBR0_EL1` to point to it, and saves the previous value
122     /// of `TTBR0_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
123     ///
124     /// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
125     /// `deactivate`.
126     ///
127     /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
128     ///
129     /// # Safety
130     ///
131     /// The caller must ensure that the page table doesn't unmap any memory which the program is
132     /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
133     /// dropped as long as its mappings are required, as it will automatically be deactivated when
134     /// it is dropped.
activate(&mut self)135     pub unsafe fn activate(&mut self) {
136         self.mapping.activate()
137     }
138 
139     /// Deactivates the page table, by setting `TTBR0_EL1` back to the value it had before
140     /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
141     /// configured ASID.
142     ///
143     /// Panics if there is no saved `TTBR0_EL1` value because `activate` has not previously been
144     /// called.
145     ///
146     /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
147     ///
148     /// # Safety
149     ///
150     /// The caller must ensure that the previous page table which this is switching back to doesn't
151     /// unmap any memory which the program is using.
deactivate(&mut self)152     pub unsafe fn deactivate(&mut self) {
153         self.mapping.deactivate()
154     }
155 
156     /// Maps the given range of virtual addresses to the identical physical addresses with the given
157     /// flags.
158     ///
159     /// This should generally only be called while the page table is not active. In particular, any
160     /// change that may require break-before-make per the architecture must be made while the page
161     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
162     /// table is active. This function writes block and page entries, but only maps them if `flags`
163     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
164     ///
165     /// # Errors
166     ///
167     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
168     ///
169     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
170     /// largest virtual address covered by the page table given its root level.
171     ///
172     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
173     ///
174     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
175     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError>176     pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
177         self.map_range_with_constraints(range, flags, Constraints::empty())
178     }
179 
180     /// Maps the given range of virtual addresses to the identical physical addresses with the given
181     /// given flags, taking the given constraints into account.
182     ///
183     /// This should generally only be called while the page table is not active. In particular, any
184     /// change that may require break-before-make per the architecture must be made while the page
185     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
186     /// table is active. This function writes block and page entries, but only maps them if `flags`
187     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
188     ///
189     /// # Errors
190     ///
191     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
192     ///
193     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
194     /// largest virtual address covered by the page table given its root level.
195     ///
196     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
197     ///
198     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
199     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range_with_constraints( &mut self, range: &MemoryRegion, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>200     pub fn map_range_with_constraints(
201         &mut self,
202         range: &MemoryRegion,
203         flags: Attributes,
204         constraints: Constraints,
205     ) -> Result<(), MapError> {
206         let pa = IdTranslation::virtual_to_physical(range.start());
207         self.mapping.map_range(range, pa, flags, constraints)
208     }
209 
210     /// Applies the provided updater function to the page table descriptors covering a given
211     /// memory range.
212     ///
213     /// This may involve splitting block entries if the provided range is not currently mapped
214     /// down to its precise boundaries. For visiting all the descriptors covering a memory range
215     /// without potential splitting (and no descriptor updates), use
216     /// [`walk_range`](Self::walk_range) instead.
217     ///
218     /// The updater function receives the following arguments:
219     ///
220     /// - The virtual address range mapped by each page table descriptor. A new descriptor will
221     ///   have been allocated before the invocation of the updater function if a page table split
222     ///   was needed.
223     /// - A mutable reference to the page table descriptor that permits modifications.
224     /// - The level of a translation table the descriptor belongs to.
225     ///
226     /// The updater function should return:
227     ///
228     /// - `Ok` to continue updating the remaining entries.
229     /// - `Err` to signal an error and stop updating the remaining entries.
230     ///
231     /// This should generally only be called while the page table is not active. In particular, any
232     /// change that may require break-before-make per the architecture must be made while the page
233     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
234     /// table is active.
235     ///
236     /// # Errors
237     ///
238     /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
239     ///
240     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
241     ///
242     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
243     /// largest virtual address covered by the page table given its root level.
244     ///
245     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
246     /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,247     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
248     where
249         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
250     {
251         self.mapping.modify_range(range, f)
252     }
253 
254     /// Applies the provided callback function to the page table descriptors covering a given
255     /// memory range.
256     ///
257     /// The callback function receives the following arguments:
258     ///
259     /// - The range covered by the current step in the walk. This is always a subrange of `range`
260     ///   even when the descriptor covers a region that exceeds it.
261     /// - The page table descriptor itself.
262     /// - The level of a translation table the descriptor belongs to.
263     ///
264     /// The callback function should return:
265     ///
266     /// - `Ok` to continue visiting the remaining entries.
267     /// - `Err` to signal an error and stop visiting the remaining entries.
268     ///
269     /// # Errors
270     ///
271     /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
272     ///
273     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
274     ///
275     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
276     /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,277     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
278     where
279         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
280     {
281         self.mapping.walk_range(range, f)
282     }
283 
284     /// Returns the physical address of the root table.
285     ///
286     /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
287     /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
288     /// [`mark_active`](Self::mark_active) after doing so.
root_address(&self) -> PhysicalAddress289     pub fn root_address(&self) -> PhysicalAddress {
290         self.mapping.root_address()
291     }
292 
293     /// Marks the page table as active.
294     ///
295     /// This should be called if the page table is manually activated by calling
296     /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
297     /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
298     /// checks to avoid violating break-before-make requirements.
299     ///
300     /// It is called automatically by [`activate`](Self::activate).
mark_active(&mut self, previous_ttbr: usize)301     pub fn mark_active(&mut self, previous_ttbr: usize) {
302         self.mapping.mark_active(previous_ttbr);
303     }
304 
305     /// Marks the page table as inactive.
306     ///
307     /// This may be called after manually disabling the use of the page table, such as by setting
308     /// the relevant TTBR to a different address.
309     ///
310     /// It is called automatically by [`deactivate`](Self::deactivate).
mark_inactive(&mut self)311     pub fn mark_inactive(&mut self) {
312         self.mapping.mark_inactive();
313     }
314 }
315 
316 #[cfg(test)]
317 mod tests {
318     use super::*;
319     use crate::{
320         paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
321         MapError, VirtualAddress,
322     };
323 
324     const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
325     const DEVICE_NGNRE: Attributes = Attributes::ATTRIBUTE_INDEX_0;
326     const NORMAL_CACHEABLE: Attributes =
327         Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
328 
329     #[test]
map_valid()330     fn map_valid() {
331         // A single byte at the start of the address space.
332         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
333         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
334         // active for the sake of BBM rules.
335         unsafe {
336             idmap.activate();
337         }
338         assert_eq!(
339             idmap.map_range(
340                 &MemoryRegion::new(0, 1),
341                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
342             ),
343             Ok(())
344         );
345 
346         // Two pages at the start of the address space.
347         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
348         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
349         // active for the sake of BBM rules.
350         unsafe {
351             idmap.activate();
352         }
353         assert_eq!(
354             idmap.map_range(
355                 &MemoryRegion::new(0, PAGE_SIZE * 2),
356                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
357             ),
358             Ok(())
359         );
360 
361         // A single byte at the end of the address space.
362         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
363         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
364         // active for the sake of BBM rules.
365         unsafe {
366             idmap.activate();
367         }
368         assert_eq!(
369             idmap.map_range(
370                 &MemoryRegion::new(
371                     MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
372                     MAX_ADDRESS_FOR_ROOT_LEVEL_1
373                 ),
374                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
375             ),
376             Ok(())
377         );
378 
379         // Two pages, on the boundary between two subtables.
380         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
381         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
382         // active for the sake of BBM rules.
383         unsafe {
384             idmap.activate();
385         }
386         assert_eq!(
387             idmap.map_range(
388                 &MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
389                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
390             ),
391             Ok(())
392         );
393 
394         // The entire valid address space.
395         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
396         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
397         // active for the sake of BBM rules.
398         unsafe {
399             idmap.activate();
400         }
401         assert_eq!(
402             idmap.map_range(
403                 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
404                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
405             ),
406             Ok(())
407         );
408     }
409 
410     #[test]
map_break_before_make()411     fn map_break_before_make() {
412         const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
413         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
414         idmap
415             .map_range_with_constraints(
416                 &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
417                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
418                 Constraints::NO_BLOCK_MAPPINGS,
419             )
420             .unwrap();
421         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
422         // active for the sake of BBM rules.
423         unsafe {
424             idmap.activate();
425         }
426 
427         // Splitting a range is permitted if it was mapped down to pages
428         assert_eq!(
429             idmap.map_range(
430                 &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
431                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
432             ),
433             Ok(())
434         );
435 
436         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
437         idmap
438             .map_range(
439                 &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
440                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
441             )
442             .ok();
443         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
444         // active for the sake of BBM rules.
445         unsafe {
446             idmap.activate();
447         }
448 
449         // Extending a range is fine even if there are block mappings
450         // in the middle
451         assert_eq!(
452             idmap.map_range(
453                 &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE),
454                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
455             ),
456             Ok(())
457         );
458 
459         // Splitting a range is not permitted
460         assert_eq!(
461             idmap.map_range(
462                 &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
463                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
464             ),
465             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
466                 BLOCK_SIZE,
467                 BLOCK_SIZE + PAGE_SIZE
468             )))
469         );
470 
471         // Remapping a partially live range read-only is only permitted
472         // if it does not require splitting
473         assert_eq!(
474             idmap.map_range(
475                 &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE),
476                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED | Attributes::READ_ONLY,
477             ),
478             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
479                 BLOCK_SIZE,
480                 BLOCK_SIZE + PAGE_SIZE
481             )))
482         );
483         assert_eq!(
484             idmap.map_range(
485                 &MemoryRegion::new(0, BLOCK_SIZE),
486                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED | Attributes::READ_ONLY,
487             ),
488             Ok(())
489         );
490 
491         // Changing the memory type is not permitted
492         assert_eq!(
493             idmap.map_range(
494                 &MemoryRegion::new(0, BLOCK_SIZE),
495                 DEVICE_NGNRE | Attributes::VALID | Attributes::ACCESSED | Attributes::NON_GLOBAL,
496             ),
497             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
498                 0, PAGE_SIZE
499             )))
500         );
501 
502         // Making a range invalid is only permitted if it does not require splitting
503         assert_eq!(
504             idmap.map_range(
505                 &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE),
506                 NORMAL_CACHEABLE,
507             ),
508             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
509                 BLOCK_SIZE,
510                 BLOCK_SIZE + PAGE_SIZE
511             )))
512         );
513         assert_eq!(
514             idmap.map_range(&MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE), NORMAL_CACHEABLE),
515             Ok(())
516         );
517 
518         // Creating a new valid entry is always permitted
519         assert_eq!(
520             idmap.map_range(
521                 &MemoryRegion::new(0, 2 * PAGE_SIZE),
522                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
523             ),
524             Ok(())
525         );
526 
527         // Setting the non-global attribute is permitted
528         assert_eq!(
529             idmap.map_range(
530                 &MemoryRegion::new(0, PAGE_SIZE),
531                 NORMAL_CACHEABLE
532                     | Attributes::VALID
533                     | Attributes::ACCESSED
534                     | Attributes::NON_GLOBAL,
535             ),
536             Ok(())
537         );
538 
539         // Removing the non-global attribute from a live mapping is not permitted
540         assert_eq!(
541             idmap.map_range(
542                 &MemoryRegion::new(0, PAGE_SIZE),
543                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
544             ),
545             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
546                 0, PAGE_SIZE
547             )))
548         );
549 
550         // SAFETY: This doesn't actually deactivate the page table in tests, it just treats it as
551         // inactive for the sake of BBM rules.
552         unsafe {
553             idmap.deactivate();
554         }
555         // Removing the non-global attribute from an inactive mapping is permitted
556         assert_eq!(
557             idmap.map_range(
558                 &MemoryRegion::new(0, PAGE_SIZE),
559                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
560             ),
561             Ok(())
562         );
563     }
564 
565     #[test]
map_out_of_range()566     fn map_out_of_range() {
567         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
568 
569         // One byte, just past the edge of the valid range.
570         assert_eq!(
571             idmap.map_range(
572                 &MemoryRegion::new(
573                     MAX_ADDRESS_FOR_ROOT_LEVEL_1,
574                     MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
575                 ),
576                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
577             ),
578             Err(MapError::AddressRange(VirtualAddress(
579                 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
580             )))
581         );
582 
583         // From 0 to just past the valid range.
584         assert_eq!(
585             idmap.map_range(
586                 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
587                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
588             ),
589             Err(MapError::AddressRange(VirtualAddress(
590                 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
591             )))
592         );
593     }
594 
make_map() -> IdMap595     fn make_map() -> IdMap {
596         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
597         idmap
598             .map_range(
599                 &MemoryRegion::new(0, PAGE_SIZE * 2),
600                 NORMAL_CACHEABLE
601                     | Attributes::NON_GLOBAL
602                     | Attributes::READ_ONLY
603                     | Attributes::VALID
604                     | Attributes::ACCESSED,
605             )
606             .unwrap();
607         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
608         // active for the sake of BBM rules.
609         unsafe {
610             idmap.activate();
611         }
612         idmap
613     }
614 
615     #[test]
update_backwards_range()616     fn update_backwards_range() {
617         let mut idmap = make_map();
618         assert!(idmap
619             .modify_range(
620                 &MemoryRegion::new(PAGE_SIZE * 2, 1),
621                 &|_range, entry, _level| {
622                     entry
623                         .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
624                     Ok(())
625                 },
626             )
627             .is_err());
628     }
629 
630     #[test]
update_range()631     fn update_range() {
632         let mut idmap = make_map();
633         assert!(idmap
634             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
635                 if level == 3 || !entry.is_table_or_page() {
636                     entry.modify_flags(Attributes::SWFLAG_0, Attributes::NON_GLOBAL);
637                 }
638                 Ok(())
639             })
640             .is_err());
641         idmap
642             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
643                 if level == 3 || !entry.is_table_or_page() {
644                     entry
645                         .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
646                 }
647                 Ok(())
648             })
649             .unwrap();
650         idmap
651             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
652                 if level == 3 || !entry.is_table_or_page() {
653                     assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
654                     assert_eq!(range.end() - range.start(), PAGE_SIZE);
655                 }
656                 Ok(())
657             })
658             .unwrap();
659     }
660 
661     #[test]
breakup_invalid_block()662     fn breakup_invalid_block() {
663         const BLOCK_RANGE: usize = 0x200000;
664         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
665         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
666         // active for the sake of BBM rules.
667         unsafe {
668             idmap.activate();
669         }
670         idmap
671             .map_range(
672                 &MemoryRegion::new(0, BLOCK_RANGE),
673                 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
674             )
675             .unwrap();
676         idmap
677             .map_range(
678                 &MemoryRegion::new(0, PAGE_SIZE),
679                 NORMAL_CACHEABLE
680                     | Attributes::NON_GLOBAL
681                     | Attributes::VALID
682                     | Attributes::ACCESSED,
683             )
684             .unwrap();
685         idmap
686             .modify_range(
687                 &MemoryRegion::new(0, BLOCK_RANGE),
688                 &|range, entry, level| {
689                     if level == 3 {
690                         let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
691                         let is_first_page = range.start().0 == 0usize;
692                         assert!(has_swflag != is_first_page);
693                     }
694                     Ok(())
695                 },
696             )
697             .unwrap();
698     }
699 
700     /// When an unmapped entry is split into a table, all entries should be zero.
701     #[test]
split_table_zero()702     fn split_table_zero() {
703         let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0);
704 
705         idmap
706             .map_range(
707                 &MemoryRegion::new(0, PAGE_SIZE),
708                 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
709             )
710             .unwrap();
711         idmap
712             .walk_range(
713                 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 20),
714                 &mut |_, descriptor, _| {
715                     assert!(!descriptor.is_valid());
716                     assert_eq!(descriptor.flags(), Some(Attributes::empty()));
717                     assert_eq!(descriptor.output_address(), PhysicalAddress(0));
718                     Ok(())
719                 },
720             )
721             .unwrap();
722     }
723 }
724