1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4
5 //! Functionality for managing page tables with linear mapping.
6 //!
7 //! See [`LinearMap`] for details on how to use it.
8
9 use crate::{
10 paging::{
11 deallocate, is_aligned, Attributes, Constraints, Descriptor, MemoryRegion, PageTable,
12 PhysicalAddress, Translation, TranslationRegime, VaRange, VirtualAddress, PAGE_SIZE,
13 },
14 MapError, Mapping,
15 };
16 use core::ptr::NonNull;
17
18 /// Linear mapping, where every virtual address is either unmapped or mapped to an IPA with a fixed
19 /// offset.
20 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
21 pub struct LinearTranslation {
22 /// The offset from a virtual address to the corresponding (intermediate) physical address.
23 offset: isize,
24 }
25
26 impl LinearTranslation {
27 /// Constructs a new linear translation, which will map a virtual address `va` to the
28 /// (intermediate) physical address `va + offset`.
29 ///
30 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
new(offset: isize) -> Self31 pub fn new(offset: isize) -> Self {
32 if !is_aligned(offset.unsigned_abs(), PAGE_SIZE) {
33 panic!(
34 "Invalid offset {}, must be a multiple of page size {}.",
35 offset, PAGE_SIZE,
36 );
37 }
38 Self { offset }
39 }
40
virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError>41 fn virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError> {
42 if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) {
43 Ok(PhysicalAddress(pa))
44 } else {
45 Err(MapError::InvalidVirtualAddress(va))
46 }
47 }
48 }
49
50 impl Translation for LinearTranslation {
allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress)51 fn allocate_table(&mut self) -> (NonNull<PageTable>, PhysicalAddress) {
52 let table = PageTable::new();
53 // Assume that the same linear mapping is used everywhere.
54 let va = VirtualAddress(table.as_ptr() as usize);
55
56 let pa = self.virtual_to_physical(va).expect(
57 "Allocated subtable with virtual address which doesn't correspond to any physical address."
58 );
59 (table, pa)
60 }
61
deallocate_table(&mut self, page_table: NonNull<PageTable>)62 unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable>) {
63 deallocate(page_table);
64 }
65
physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>66 fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
67 let signed_pa = pa.0 as isize;
68 if signed_pa < 0 {
69 panic!("Invalid physical address {} for pagetable", pa);
70 }
71 if let Some(va) = signed_pa.checked_sub(self.offset) {
72 if let Some(ptr) = NonNull::new(va as *mut PageTable) {
73 ptr
74 } else {
75 panic!(
76 "Invalid physical address {} for pagetable (translated to virtual address 0)",
77 pa
78 )
79 }
80 } else {
81 panic!("Invalid physical address {} for pagetable", pa);
82 }
83 }
84 }
85
86 /// Adds two signed values, returning an unsigned value or `None` if it would overflow.
checked_add_to_unsigned(a: isize, b: isize) -> Option<usize>87 fn checked_add_to_unsigned(a: isize, b: isize) -> Option<usize> {
88 a.checked_add(b)?.try_into().ok()
89 }
90
91 /// Manages a level 1 page table using linear mapping, where every virtual address is either
92 /// unmapped or mapped to an IPA with a fixed offset.
93 ///
94 /// This assumes that the same linear mapping is used both for the page table being managed, and for
95 /// code that is managing it.
96 #[derive(Debug)]
97 pub struct LinearMap {
98 mapping: Mapping<LinearTranslation>,
99 }
100
101 impl LinearMap {
102 /// Creates a new identity-mapping page table with the given ASID, root level and offset, for
103 /// use in the given TTBR.
104 ///
105 /// This will map any virtual address `va` which is added to the table to the physical address
106 /// `va + offset`.
107 ///
108 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
new( asid: usize, rootlevel: usize, offset: isize, translation_regime: TranslationRegime, va_range: VaRange, ) -> Self109 pub fn new(
110 asid: usize,
111 rootlevel: usize,
112 offset: isize,
113 translation_regime: TranslationRegime,
114 va_range: VaRange,
115 ) -> Self {
116 Self {
117 mapping: Mapping::new(
118 LinearTranslation::new(offset),
119 asid,
120 rootlevel,
121 translation_regime,
122 va_range,
123 ),
124 }
125 }
126
127 /// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
128 /// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
129 ///
130 /// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
131 /// `deactivate`.
132 ///
133 /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
134 ///
135 /// # Safety
136 ///
137 /// The caller must ensure that the page table doesn't unmap any memory which the program is
138 /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
139 /// dropped as long as its mappings are required, as it will automatically be deactivated when
140 /// it is dropped.
activate(&mut self)141 pub unsafe fn activate(&mut self) {
142 self.mapping.activate()
143 }
144
145 /// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
146 /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
147 /// configured ASID.
148 ///
149 /// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
150 /// called.
151 ///
152 /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
153 ///
154 /// # Safety
155 ///
156 /// The caller must ensure that the previous page table which this is switching back to doesn't
157 /// unmap any memory which the program is using.
deactivate(&mut self)158 pub unsafe fn deactivate(&mut self) {
159 self.mapping.deactivate()
160 }
161
162 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
163 /// given flags.
164 ///
165 /// This should generally only be called while the page table is not active. In particular, any
166 /// change that may require break-before-make per the architecture must be made while the page
167 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
168 /// table is active. This function writes block and page entries, but only maps them if `flags`
169 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
170 ///
171 /// # Errors
172 ///
173 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
174 /// address within the `range` would result in overflow.
175 ///
176 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
177 ///
178 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
179 /// largest virtual address covered by the page table given its root level.
180 ///
181 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
182 ///
183 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
184 /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError>185 pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
186 self.map_range_with_constraints(range, flags, Constraints::empty())
187 }
188
189 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
190 /// given flags, taking the given constraints into account.
191 ///
192 /// This should generally only be called while the page table is not active. In particular, any
193 /// change that may require break-before-make per the architecture must be made while the page
194 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
195 /// table is active. This function writes block and page entries, but only maps them if `flags`
196 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
197 ///
198 /// # Errors
199 ///
200 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
201 /// address within the `range` would result in overflow.
202 ///
203 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
204 ///
205 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
206 /// largest virtual address covered by the page table given its root level.
207 ///
208 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
209 ///
210 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
211 /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range_with_constraints( &mut self, range: &MemoryRegion, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>212 pub fn map_range_with_constraints(
213 &mut self,
214 range: &MemoryRegion,
215 flags: Attributes,
216 constraints: Constraints,
217 ) -> Result<(), MapError> {
218 let pa = self
219 .mapping
220 .root
221 .translation()
222 .virtual_to_physical(range.start())?;
223 self.mapping.map_range(range, pa, flags, constraints)
224 }
225
226 /// Applies the provided updater function to the page table descriptors covering a given
227 /// memory range.
228 ///
229 /// This may involve splitting block entries if the provided range is not currently mapped
230 /// down to its precise boundaries. For visiting all the descriptors covering a memory range
231 /// without potential splitting (and no descriptor updates), use
232 /// [`walk_range`](Self::walk_range) instead.
233 ///
234 /// The updater function receives the following arguments:
235 ///
236 /// - The virtual address range mapped by each page table descriptor. A new descriptor will
237 /// have been allocated before the invocation of the updater function if a page table split
238 /// was needed.
239 /// - A mutable reference to the page table descriptor that permits modifications.
240 /// - The level of a translation table the descriptor belongs to.
241 ///
242 /// The updater function should return:
243 ///
244 /// - `Ok` to continue updating the remaining entries.
245 /// - `Err` to signal an error and stop updating the remaining entries.
246 ///
247 /// This should generally only be called while the page table is not active. In particular, any
248 /// change that may require break-before-make per the architecture must be made while the page
249 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
250 /// table is active.
251 ///
252 /// # Errors
253 ///
254 /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
255 ///
256 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
257 ///
258 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
259 /// largest virtual address covered by the page table given its root level.
260 ///
261 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
262 /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,263 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
264 where
265 F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
266 {
267 self.mapping.modify_range(range, f)
268 }
269
270 /// Applies the provided callback function to the page table descriptors covering a given
271 /// memory range.
272 ///
273 /// The callback function receives the following arguments:
274 ///
275 /// - The range covered by the current step in the walk. This is always a subrange of `range`
276 /// even when the descriptor covers a region that exceeds it.
277 /// - The page table descriptor itself.
278 /// - The level of a translation table the descriptor belongs to.
279 ///
280 /// The callback function should return:
281 ///
282 /// - `Ok` to continue visiting the remaining entries.
283 /// - `Err` to signal an error and stop visiting the remaining entries.
284 ///
285 /// # Errors
286 ///
287 /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
288 ///
289 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
290 ///
291 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
292 /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,293 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
294 where
295 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
296 {
297 self.mapping.walk_range(range, f)
298 }
299
300 /// Returns the physical address of the root table.
301 ///
302 /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
303 /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
304 /// [`mark_active`](Self::mark_active) after doing so.
root_address(&self) -> PhysicalAddress305 pub fn root_address(&self) -> PhysicalAddress {
306 self.mapping.root_address()
307 }
308
309 /// Marks the page table as active.
310 ///
311 /// This should be called if the page table is manually activated by calling
312 /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
313 /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
314 /// checks to avoid violating break-before-make requirements.
315 ///
316 /// It is called automatically by [`activate`](Self::activate).
mark_active(&mut self, previous_ttbr: usize)317 pub fn mark_active(&mut self, previous_ttbr: usize) {
318 self.mapping.mark_active(previous_ttbr);
319 }
320
321 /// Marks the page table as inactive.
322 ///
323 /// This may be called after manually disabling the use of the page table, such as by setting
324 /// the relevant TTBR to a different address.
325 ///
326 /// It is called automatically by [`deactivate`](Self::deactivate).
mark_inactive(&mut self)327 pub fn mark_inactive(&mut self) {
328 self.mapping.mark_inactive();
329 }
330 }
331
332 #[cfg(test)]
333 mod tests {
334 use super::*;
335 use crate::{
336 paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
337 MapError,
338 };
339
340 const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
341 const GIB_512_S: isize = 512 * 1024 * 1024 * 1024;
342 const GIB_512: usize = 512 * 1024 * 1024 * 1024;
343 const NORMAL_CACHEABLE: Attributes =
344 Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
345
346 #[test]
map_valid()347 fn map_valid() {
348 // A single byte at the start of the address space.
349 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
350 assert_eq!(
351 pagetable.map_range(
352 &MemoryRegion::new(0, 1),
353 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
354 ),
355 Ok(())
356 );
357
358 // Two pages at the start of the address space.
359 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
360 assert_eq!(
361 pagetable.map_range(
362 &MemoryRegion::new(0, PAGE_SIZE * 2),
363 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
364 ),
365 Ok(())
366 );
367
368 // A single byte at the end of the address space.
369 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
370 assert_eq!(
371 pagetable.map_range(
372 &MemoryRegion::new(
373 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
374 MAX_ADDRESS_FOR_ROOT_LEVEL_1
375 ),
376 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
377 ),
378 Ok(())
379 );
380
381 // The entire valid address space. Use an offset that is a multiple of the level 2 block
382 // size to avoid mapping everything as pages as that is really slow.
383 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
384 let mut pagetable = LinearMap::new(
385 1,
386 1,
387 LEVEL_2_BLOCK_SIZE as isize,
388 TranslationRegime::El1And0,
389 VaRange::Lower,
390 );
391 assert_eq!(
392 pagetable.map_range(
393 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
394 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
395 ),
396 Ok(())
397 );
398 }
399
400 #[test]
map_valid_negative_offset()401 fn map_valid_negative_offset() {
402 // A single byte which maps to IPA 0.
403 let mut pagetable = LinearMap::new(
404 1,
405 1,
406 -(PAGE_SIZE as isize),
407 TranslationRegime::El1And0,
408 VaRange::Lower,
409 );
410 assert_eq!(
411 pagetable.map_range(
412 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
413 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
414 ),
415 Ok(())
416 );
417
418 // Two pages at the start of the address space.
419 let mut pagetable = LinearMap::new(
420 1,
421 1,
422 -(PAGE_SIZE as isize),
423 TranslationRegime::El1And0,
424 VaRange::Lower,
425 );
426 assert_eq!(
427 pagetable.map_range(
428 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
429 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
430 ),
431 Ok(())
432 );
433
434 // A single byte at the end of the address space.
435 let mut pagetable = LinearMap::new(
436 1,
437 1,
438 -(PAGE_SIZE as isize),
439 TranslationRegime::El1And0,
440 VaRange::Lower,
441 );
442 assert_eq!(
443 pagetable.map_range(
444 &MemoryRegion::new(
445 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
446 MAX_ADDRESS_FOR_ROOT_LEVEL_1
447 ),
448 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
449 ),
450 Ok(())
451 );
452
453 // The entire valid address space. Use an offset that is a multiple of the level 2 block
454 // size to avoid mapping everything as pages as that is really slow.
455 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
456 let mut pagetable = LinearMap::new(
457 1,
458 1,
459 -(LEVEL_2_BLOCK_SIZE as isize),
460 TranslationRegime::El1And0,
461 VaRange::Lower,
462 );
463 assert_eq!(
464 pagetable.map_range(
465 &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
466 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
467 ),
468 Ok(())
469 );
470 }
471
472 #[test]
map_out_of_range()473 fn map_out_of_range() {
474 let mut pagetable = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
475
476 // One byte, just past the edge of the valid range.
477 assert_eq!(
478 pagetable.map_range(
479 &MemoryRegion::new(
480 MAX_ADDRESS_FOR_ROOT_LEVEL_1,
481 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
482 ),
483 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
484 ),
485 Err(MapError::AddressRange(VirtualAddress(
486 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
487 )))
488 );
489
490 // From 0 to just past the valid range.
491 assert_eq!(
492 pagetable.map_range(
493 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
494 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED
495 ),
496 Err(MapError::AddressRange(VirtualAddress(
497 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
498 )))
499 );
500 }
501
502 #[test]
map_invalid_offset()503 fn map_invalid_offset() {
504 let mut pagetable = LinearMap::new(1, 1, -4096, TranslationRegime::El1And0, VaRange::Lower);
505
506 // One byte, with an offset which would map it to a negative IPA.
507 assert_eq!(
508 pagetable.map_range(&MemoryRegion::new(0, 1), NORMAL_CACHEABLE),
509 Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
510 );
511 }
512
513 #[test]
physical_address_in_range_ttbr0()514 fn physical_address_in_range_ttbr0() {
515 let translation = LinearTranslation::new(4096);
516 assert_eq!(
517 translation.physical_to_virtual(PhysicalAddress(8192)),
518 NonNull::new(4096 as *mut PageTable).unwrap(),
519 );
520 assert_eq!(
521 translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)),
522 NonNull::new(GIB_512 as *mut PageTable).unwrap(),
523 );
524 }
525
526 #[test]
527 #[should_panic]
physical_address_to_zero_ttbr0()528 fn physical_address_to_zero_ttbr0() {
529 let translation = LinearTranslation::new(4096);
530 translation.physical_to_virtual(PhysicalAddress(4096));
531 }
532
533 #[test]
534 #[should_panic]
physical_address_out_of_range_ttbr0()535 fn physical_address_out_of_range_ttbr0() {
536 let translation = LinearTranslation::new(4096);
537 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
538 }
539
540 #[test]
physical_address_in_range_ttbr1()541 fn physical_address_in_range_ttbr1() {
542 // Map the 512 GiB region at the top of virtual address space to one page above the bottom
543 // of physical address space.
544 let translation = LinearTranslation::new(GIB_512_S + 4096);
545 assert_eq!(
546 translation.physical_to_virtual(PhysicalAddress(8192)),
547 NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(),
548 );
549 assert_eq!(
550 translation.physical_to_virtual(PhysicalAddress(GIB_512)),
551 NonNull::new(-4096_isize as *mut PageTable).unwrap(),
552 );
553 }
554
555 #[test]
556 #[should_panic]
physical_address_to_zero_ttbr1()557 fn physical_address_to_zero_ttbr1() {
558 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
559 // address space.
560 let translation = LinearTranslation::new(GIB_512_S);
561 translation.physical_to_virtual(PhysicalAddress(GIB_512));
562 }
563
564 #[test]
565 #[should_panic]
physical_address_out_of_range_ttbr1()566 fn physical_address_out_of_range_ttbr1() {
567 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
568 // address space.
569 let translation = LinearTranslation::new(GIB_512_S);
570 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
571 }
572
573 #[test]
virtual_address_out_of_range()574 fn virtual_address_out_of_range() {
575 let translation = LinearTranslation::new(-4096);
576 let va = VirtualAddress(1024);
577 assert_eq!(
578 translation.virtual_to_physical(va),
579 Err(MapError::InvalidVirtualAddress(va))
580 )
581 }
582
583 #[test]
virtual_address_range_ttbr1()584 fn virtual_address_range_ttbr1() {
585 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
586 // address space.
587 let translation = LinearTranslation::new(GIB_512_S);
588
589 // The first page in the region covered by TTBR1.
590 assert_eq!(
591 translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)),
592 Ok(PhysicalAddress(0))
593 );
594 // The last page in the region covered by TTBR1.
595 assert_eq!(
596 translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)),
597 Ok(PhysicalAddress(0x7f_ffff_f000))
598 );
599 }
600
601 #[test]
block_mapping()602 fn block_mapping() {
603 // Test that block mapping is used when the PA is appropriately aligned...
604 let mut pagetable =
605 LinearMap::new(1, 1, 1 << 30, TranslationRegime::El1And0, VaRange::Lower);
606 pagetable
607 .map_range(
608 &MemoryRegion::new(0, 1 << 30),
609 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
610 )
611 .unwrap();
612 assert_eq!(
613 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
614 Some(1)
615 );
616
617 // ...but not when it is not.
618 let mut pagetable =
619 LinearMap::new(1, 1, 1 << 29, TranslationRegime::El1And0, VaRange::Lower);
620 pagetable
621 .map_range(
622 &MemoryRegion::new(0, 1 << 30),
623 NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED,
624 )
625 .unwrap();
626 assert_eq!(
627 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
628 Some(2)
629 );
630 }
631
make_map() -> LinearMap632 fn make_map() -> LinearMap {
633 let mut lmap = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower);
634 // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
635 lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), NORMAL_CACHEABLE)
636 .unwrap();
637 lmap
638 }
639
640 #[test]
update_backwards_range()641 fn update_backwards_range() {
642 let mut lmap = make_map();
643 assert!(lmap
644 .modify_range(
645 &MemoryRegion::new(PAGE_SIZE * 2, 1),
646 &|_range, entry, _level| {
647 entry
648 .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
649 Ok(())
650 },
651 )
652 .is_err());
653 }
654
655 #[test]
update_range()656 fn update_range() {
657 let mut lmap = make_map();
658 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
659 if level == 3 || !entry.is_table_or_page() {
660 entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
661 }
662 Ok(())
663 })
664 .unwrap();
665 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
666 if level == 3 || !entry.is_table_or_page() {
667 assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
668 assert_eq!(range.end() - range.start(), PAGE_SIZE);
669 }
670 Ok(())
671 })
672 .unwrap();
673 }
674
675 #[test]
breakup_invalid_block()676 fn breakup_invalid_block() {
677 const BLOCK_RANGE: usize = 0x200000;
678
679 let mut lmap = LinearMap::new(1, 1, 0x1000, TranslationRegime::El1And0, VaRange::Lower);
680 lmap.map_range(
681 &MemoryRegion::new(0, BLOCK_RANGE),
682 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
683 )
684 .unwrap();
685 lmap.map_range(
686 &MemoryRegion::new(0, PAGE_SIZE),
687 NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
688 )
689 .unwrap();
690 lmap.modify_range(
691 &MemoryRegion::new(0, BLOCK_RANGE),
692 &|range, entry, level| {
693 if level == 3 {
694 let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
695 let is_first_page = range.start().0 == 0usize;
696 assert!(has_swflag != is_first_page);
697 }
698 Ok(())
699 },
700 )
701 .unwrap();
702 }
703 }
704