1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! Memory management.
16
17 use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18 use super::error::MemoryTrackerError;
19 use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
20 use super::shared::{SHARED_MEMORY, SHARED_POOL};
21 use crate::dsb;
22 use crate::layout;
23 use crate::memory::shared::{MemoryRange, MemorySharer, MmioSharer};
24 use crate::util::RangeExt as _;
25 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
26 use alloc::boxed::Box;
27 use buddy_system_allocator::LockedFrameAllocator;
28 use core::mem::size_of;
29 use core::num::NonZeroUsize;
30 use core::ops::Range;
31 use core::result;
32 use hypervisor_backends::{get_mem_sharer, get_mmio_guard};
33 use log::{debug, error, info};
34 use spin::mutex::{SpinMutex, SpinMutexGuard};
35 use tinyvec::ArrayVec;
36
37 /// A global static variable representing the system memory tracker, protected by a spin mutex.
38 pub(crate) static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
39
get_va_range(range: &MemoryRange) -> VaRange40 fn get_va_range(range: &MemoryRange) -> VaRange {
41 VaRange::new(range.start, range.end)
42 }
43
44 type Result<T> = result::Result<T, MemoryTrackerError>;
45
46 /// Attempts to lock `MEMORY`, returns an error if already deactivated.
try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>>47 fn try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>> {
48 // Being single-threaded, we only spin if `deactivate_dynamic_page_tables()` leaked the lock.
49 MEMORY.try_lock().ok_or(MemoryTrackerError::Unavailable)
50 }
51
52 /// Switch the MMU to the provided PageTable.
53 ///
54 /// Panics if called more than once.
switch_to_dynamic_page_tables()55 pub(crate) fn switch_to_dynamic_page_tables() {
56 let mut locked_tracker = try_lock_memory_tracker().unwrap();
57 if locked_tracker.is_some() {
58 panic!("switch_to_dynamic_page_tables() called more than once.");
59 }
60
61 locked_tracker.replace(MemoryTracker::new(
62 layout::crosvm::MEM_START..layout::MAX_VIRT_ADDR,
63 layout::crosvm::MMIO_RANGE,
64 ));
65 }
66
67 /// Switch the MMU back to the static page tables (see `idmap` C symbol).
68 ///
69 /// Panics if called before `switch_to_dynamic_page_tables()` or more than once.
deactivate_dynamic_page_tables()70 pub fn deactivate_dynamic_page_tables() {
71 let locked_tracker = try_lock_memory_tracker().unwrap();
72 // Force future calls to try_lock_memory_tracker() to fail by leaking this lock guard.
73 let leaked_tracker = SpinMutexGuard::leak(locked_tracker);
74 // Force deallocation/unsharing of all the resources used by the MemoryTracker.
75 drop(leaked_tracker.take())
76 }
77
78 /// Redefines the actual mappable range of memory.
79 ///
80 /// Fails if a region has already been mapped beyond the new upper limit.
resize_available_memory(memory_range: &Range<usize>) -> Result<()>81 pub fn resize_available_memory(memory_range: &Range<usize>) -> Result<()> {
82 let mut locked_tracker = try_lock_memory_tracker()?;
83 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
84 tracker.shrink(memory_range)
85 }
86
87 /// Initialize the memory pool for page sharing with the host.
init_shared_pool(static_range: Option<Range<usize>>) -> Result<()>88 pub fn init_shared_pool(static_range: Option<Range<usize>>) -> Result<()> {
89 let mut locked_tracker = try_lock_memory_tracker()?;
90 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
91 if let Some(mem_sharer) = get_mem_sharer() {
92 let granule = mem_sharer.granule()?;
93 tracker.init_dynamic_shared_pool(granule)
94 } else if let Some(r) = static_range {
95 tracker.init_static_shared_pool(r)
96 } else {
97 info!("Initialized shared pool from heap memory without MEM_SHARE");
98 tracker.init_heap_shared_pool()
99 }
100 }
101
102 /// Unshare all MMIO that was previously shared with the host, with the exception of the UART page.
unshare_all_mmio_except_uart() -> Result<()>103 pub fn unshare_all_mmio_except_uart() -> Result<()> {
104 let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return Ok(()) };
105 let Some(tracker) = locked_tracker.as_mut() else { return Ok(()) };
106 if cfg!(feature = "compat_android_13") {
107 info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
108 }
109 tracker.unshare_all_mmio()
110 }
111
112 /// Unshare all memory that was previously shared with the host.
unshare_all_memory()113 pub fn unshare_all_memory() {
114 let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return };
115 let Some(tracker) = locked_tracker.as_mut() else { return };
116 tracker.unshare_all_memory()
117 }
118
119 /// Unshare the UART page, previously shared with the host.
unshare_uart() -> Result<()>120 pub fn unshare_uart() -> Result<()> {
121 let Some(mmio_guard) = get_mmio_guard() else { return Ok(()) };
122 Ok(mmio_guard.unmap(layout::UART_PAGE_ADDR)?)
123 }
124
125 /// Map the provided range as normal memory, with R/W permissions.
126 ///
127 /// This fails if the range has already been (partially) mapped.
map_data(addr: usize, size: NonZeroUsize) -> Result<()>128 pub fn map_data(addr: usize, size: NonZeroUsize) -> Result<()> {
129 let mut locked_tracker = try_lock_memory_tracker()?;
130 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
131 let _ = tracker.alloc_mut(addr, size)?;
132 Ok(())
133 }
134
135 /// Map the provided range as normal memory, with R/W permissions.
136 ///
137 /// Unlike `map_data()`, `deactivate_dynamic_page_tables()` will not flush caches for the range.
138 ///
139 /// This fails if the range has already been (partially) mapped.
map_data_noflush(addr: usize, size: NonZeroUsize) -> Result<()>140 pub fn map_data_noflush(addr: usize, size: NonZeroUsize) -> Result<()> {
141 let mut locked_tracker = try_lock_memory_tracker()?;
142 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
143 let _ = tracker.alloc_mut_noflush(addr, size)?;
144 Ok(())
145 }
146
147 /// Map the region potentially holding data appended to the image, with read-write permissions.
148 ///
149 /// This fails if the footer has already been mapped.
map_image_footer() -> Result<Range<usize>>150 pub fn map_image_footer() -> Result<Range<usize>> {
151 let mut locked_tracker = try_lock_memory_tracker()?;
152 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
153 let range = tracker.map_image_footer()?;
154 Ok(range)
155 }
156
157 /// Map the provided range as normal memory, with read-only permissions.
158 ///
159 /// This fails if the range has already been (partially) mapped.
map_rodata(addr: usize, size: NonZeroUsize) -> Result<()>160 pub fn map_rodata(addr: usize, size: NonZeroUsize) -> Result<()> {
161 let mut locked_tracker = try_lock_memory_tracker()?;
162 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
163 let _ = tracker.alloc(addr, size)?;
164 Ok(())
165 }
166
167 // TODO(ptosi): Merge this into map_rodata.
168 /// Map the provided range as normal memory, with read-only permissions.
169 ///
170 /// # Safety
171 ///
172 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only data.
map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()>173 pub unsafe fn map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()> {
174 let mut locked_tracker = try_lock_memory_tracker()?;
175 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
176 let end = addr + usize::from(size);
177 // SAFETY: Caller has checked that it is valid to map the range.
178 let _ = unsafe { tracker.alloc_range_outside_main_memory(&(addr..end)) }?;
179 Ok(())
180 }
181
182 /// Map the provided range as device memory.
183 ///
184 /// This fails if the range has already been (partially) mapped.
map_device(addr: usize, size: NonZeroUsize) -> Result<()>185 pub fn map_device(addr: usize, size: NonZeroUsize) -> Result<()> {
186 let mut locked_tracker = try_lock_memory_tracker()?;
187 let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
188 let range = addr..(addr + usize::from(size));
189 tracker.map_mmio_range(range.clone())
190 }
191
192 #[derive(Clone, Copy, Debug, Default, PartialEq)]
193 enum MemoryType {
194 #[default]
195 ReadOnly,
196 ReadWrite,
197 }
198
199 #[derive(Clone, Debug, Default)]
200 struct MemoryRegion {
201 range: MemoryRange,
202 mem_type: MemoryType,
203 }
204
205 /// Tracks non-overlapping slices of main memory.
206 pub(crate) struct MemoryTracker {
207 total: MemoryRange,
208 page_table: PageTable,
209 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
210 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
211 mmio_range: MemoryRange,
212 image_footer_mapped: bool,
213 mmio_sharer: MmioSharer,
214 }
215
216 impl MemoryTracker {
217 const CAPACITY: usize = 5;
218 const MMIO_CAPACITY: usize = 5;
219
220 /// Creates a new instance from an active page table, covering the maximum RAM size.
new(total: MemoryRange, mmio_range: MemoryRange) -> Self221 fn new(total: MemoryRange, mmio_range: MemoryRange) -> Self {
222 assert!(
223 !total.overlaps(&mmio_range),
224 "MMIO space should not overlap with the main memory region."
225 );
226
227 let mut page_table = Self::initialize_dynamic_page_tables();
228 // Activate dirty state management first, otherwise we may get permission faults immediately
229 // after activating the new page table. This has no effect before the new page table is
230 // activated because none of the entries in the initial idmap have the DBM flag.
231 set_dbm_enabled(true);
232
233 debug!("Activating dynamic page table...");
234 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
235 // aware of so activating it shouldn't have any visible effect.
236 unsafe { page_table.activate() }
237 debug!("... Success!");
238
239 Self {
240 total,
241 page_table,
242 regions: ArrayVec::new(),
243 mmio_regions: ArrayVec::new(),
244 mmio_range,
245 image_footer_mapped: false,
246 mmio_sharer: MmioSharer::new().unwrap(),
247 }
248 }
249
250 /// Resize the total RAM size.
251 ///
252 /// This function fails if it contains regions that are not included within the new size.
shrink(&mut self, range: &MemoryRange) -> Result<()>253 fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
254 if range.start != self.total.start {
255 return Err(MemoryTrackerError::DifferentBaseAddress);
256 }
257 if self.total.end < range.end {
258 return Err(MemoryTrackerError::SizeTooLarge);
259 }
260 if !self.regions.iter().all(|r| r.range.is_within(range)) {
261 return Err(MemoryTrackerError::SizeTooSmall);
262 }
263
264 self.total = range.clone();
265 Ok(())
266 }
267
268 /// Allocate the address range for a const slice; returns None if failed.
alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange>269 fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
270 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
271 self.check_allocatable(®ion)?;
272 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
273 error!("Error during range allocation: {e}");
274 MemoryTrackerError::FailedToMap
275 })?;
276 self.add(region)
277 }
278
279 /// Allocates the address range for a const slice.
280 ///
281 /// # Safety
282 ///
283 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
284 /// data.
alloc_range_outside_main_memory( &mut self, range: &MemoryRange, ) -> Result<MemoryRange>285 unsafe fn alloc_range_outside_main_memory(
286 &mut self,
287 range: &MemoryRange,
288 ) -> Result<MemoryRange> {
289 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
290 self.check_no_overlap(®ion)?;
291 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
292 error!("Error during range allocation: {e}");
293 MemoryTrackerError::FailedToMap
294 })?;
295 self.add(region)
296 }
297
298 /// Allocate the address range for a mutable slice; returns None if failed.
alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange>299 fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
300 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
301 self.check_allocatable(®ion)?;
302 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
303 error!("Error during mutable range allocation: {e}");
304 MemoryTrackerError::FailedToMap
305 })?;
306 self.add(region)
307 }
308
alloc_range_mut_noflush(&mut self, range: &MemoryRange) -> Result<MemoryRange>309 fn alloc_range_mut_noflush(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
310 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
311 self.check_allocatable(®ion)?;
312 self.page_table.map_data(&get_va_range(range)).map_err(|e| {
313 error!("Error during non-flushed mutable range allocation: {e}");
314 MemoryTrackerError::FailedToMap
315 })?;
316 self.add(region)
317 }
318
319 /// Maps the image footer, with read-write permissions.
map_image_footer(&mut self) -> Result<MemoryRange>320 fn map_image_footer(&mut self) -> Result<MemoryRange> {
321 if self.image_footer_mapped {
322 return Err(MemoryTrackerError::FooterAlreadyMapped);
323 }
324 let range = layout::image_footer_range();
325 self.page_table.map_data_dbm(&range.clone().into()).map_err(|e| {
326 error!("Error during image footer map: {e}");
327 MemoryTrackerError::FailedToMap
328 })?;
329 self.image_footer_mapped = true;
330 Ok(range.start.0..range.end.0)
331 }
332
333 /// Allocate the address range for a const slice; returns None if failed.
alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>334 fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
335 self.alloc_range(&(base..(base + size.get())))
336 }
337
338 /// Allocate the address range for a mutable slice; returns None if failed.
alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>339 fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
340 self.alloc_range_mut(&(base..(base + size.get())))
341 }
342
alloc_mut_noflush(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>343 fn alloc_mut_noflush(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
344 self.alloc_range_mut_noflush(&(base..(base + size.get())))
345 }
346
347 /// Checks that the given range of addresses is within the MMIO region, and then maps it
348 /// appropriately.
map_mmio_range(&mut self, range: MemoryRange) -> Result<()>349 fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
350 if !range.is_within(&self.mmio_range) {
351 return Err(MemoryTrackerError::OutOfRange);
352 }
353 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
354 return Err(MemoryTrackerError::Overlaps);
355 }
356 if self.mmio_regions.len() == self.mmio_regions.capacity() {
357 return Err(MemoryTrackerError::Full);
358 }
359
360 if get_mmio_guard().is_some() {
361 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
362 error!("Error during lazy MMIO device mapping: {e}");
363 MemoryTrackerError::FailedToMap
364 })?;
365 } else {
366 self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
367 error!("Error during MMIO device mapping: {e}");
368 MemoryTrackerError::FailedToMap
369 })?;
370 }
371
372 if self.mmio_regions.try_push(range).is_some() {
373 return Err(MemoryTrackerError::Full);
374 }
375
376 Ok(())
377 }
378
379 /// Checks that the memory region meets the following criteria:
380 /// - It is within the range of the `MemoryTracker`.
381 /// - It does not overlap with any previously allocated regions.
382 /// - The `regions` ArrayVec has sufficient capacity to add it.
check_allocatable(&self, region: &MemoryRegion) -> Result<()>383 fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
384 if !region.range.is_within(&self.total) {
385 return Err(MemoryTrackerError::OutOfRange);
386 }
387 self.check_no_overlap(region)
388 }
389
390 /// Checks that the given region doesn't overlap with any other previously allocated regions,
391 /// and that the regions ArrayVec has capacity to add it.
check_no_overlap(&self, region: &MemoryRegion) -> Result<()>392 fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
393 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
394 return Err(MemoryTrackerError::Overlaps);
395 }
396 if self.regions.len() == self.regions.capacity() {
397 return Err(MemoryTrackerError::Full);
398 }
399 Ok(())
400 }
401
add(&mut self, region: MemoryRegion) -> Result<MemoryRange>402 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
403 if self.regions.try_push(region).is_some() {
404 return Err(MemoryTrackerError::Full);
405 }
406
407 Ok(self.regions.last().unwrap().range.clone())
408 }
409
410 /// Unshares any MMIO region previously shared with the MMIO guard.
unshare_all_mmio(&mut self) -> Result<()>411 fn unshare_all_mmio(&mut self) -> Result<()> {
412 self.mmio_sharer.unshare_all();
413
414 Ok(())
415 }
416
417 /// Initialize the shared heap to dynamically share memory from the global allocator.
init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()>418 fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
419 const INIT_CAP: usize = 10;
420
421 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
422 if previous.is_some() {
423 return Err(MemoryTrackerError::SharedMemorySetFailure);
424 }
425
426 SHARED_POOL
427 .set(Box::new(LockedFrameAllocator::new()))
428 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
429
430 Ok(())
431 }
432
433 /// Initialize the shared heap from a static region of memory.
434 ///
435 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
436 /// to share its memory with host. Instead they allow host to designate part
437 /// of guest memory as "shared" ahead of guest starting its execution. The
438 /// shared memory region is indicated in swiotlb node. On such platforms use
439 /// a separate heap to allocate buffers that can be shared with host.
init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()>440 fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
441 let size = NonZeroUsize::new(range.len()).unwrap();
442 let range = self.alloc_mut(range.start, size)?;
443 let shared_pool = LockedFrameAllocator::<32>::new();
444
445 shared_pool.lock().insert(range);
446
447 SHARED_POOL
448 .set(Box::new(shared_pool))
449 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
450
451 Ok(())
452 }
453
454 /// Initialize the shared heap to use heap memory directly.
455 ///
456 /// When running on "non-protected" hypervisors which permit host direct accesses to guest
457 /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
458 /// dedicated region so this function instructs the shared pool to use the global allocator.
init_heap_shared_pool(&mut self) -> Result<()>459 fn init_heap_shared_pool(&mut self) -> Result<()> {
460 // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
461 // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
462 // without any actual "dynamic memory sharing" taking place and, as such, the granule may
463 // be set to the one of the global_allocator i.e. a byte.
464 self.init_dynamic_shared_pool(size_of::<u8>())
465 }
466
467 /// Unshares any memory that may have been shared.
unshare_all_memory(&mut self)468 pub fn unshare_all_memory(&mut self) {
469 drop(SHARED_MEMORY.lock().take());
470 }
471
472 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
473 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()>474 pub(crate) fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
475 let shared_range = self.mmio_sharer.share(addr)?;
476 self.map_lazy_mmio_as_valid(&shared_range)?;
477
478 Ok(())
479 }
480
481 /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
482 ///
483 /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()>484 fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
485 // This must be safe and free from break-before-make (BBM) violations, given that the
486 // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
487 // created inside the mapping has the same size and alignment.
488 self.page_table
489 .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
490 let flags = desc.flags().expect("Unsupported PTE flags set");
491 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
492 desc.modify_flags(Attributes::VALID, Attributes::empty());
493 Ok(())
494 } else {
495 Err(())
496 }
497 })
498 .map_err(|_| MemoryTrackerError::InvalidPte)
499 }
500
501 /// Flush all memory regions marked as writable-dirty.
flush_dirty_pages(&mut self) -> Result<()>502 fn flush_dirty_pages(&mut self) -> Result<()> {
503 // Collect memory ranges for which dirty state is tracked.
504 let writable_regions =
505 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
506 // Execute a barrier instruction to ensure all hardware updates to the page table have been
507 // observed before reading PTE flags to determine dirty state.
508 dsb!("ish");
509 // Now flush writable-dirty pages in those regions.
510 for range in writable_regions {
511 self.page_table
512 .walk_range(&get_va_range(range), &flush_dirty_range)
513 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
514 }
515 if self.image_footer_mapped {
516 let range = layout::image_footer_range();
517 self.page_table
518 .walk_range(&range.into(), &flush_dirty_range)
519 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
520 }
521 Ok(())
522 }
523
524 /// Handles permission fault for read-only blocks by setting writable-dirty state.
525 /// In general, this should be called from the exception handler when hardware dirty
526 /// state management is disabled or unavailable.
handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()>527 pub(crate) fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
528 self.page_table
529 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
530 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
531 }
532
533 // TODO(ptosi): Move this and `PageTable` references to crate::arch::aarch64
534 /// Produces a `PageTable` that can safely replace the static PTs.
initialize_dynamic_page_tables() -> PageTable535 fn initialize_dynamic_page_tables() -> PageTable {
536 let text = layout::text_range();
537 let rodata = layout::rodata_range();
538 let data_bss = layout::data_bss_range();
539 let eh_stack = layout::eh_stack_range();
540 let stack = layout::stack_range();
541 let console_uart_page = layout::console_uart_page();
542
543 let mut page_table = PageTable::default();
544
545 page_table.map_device(&console_uart_page.into()).unwrap();
546 page_table.map_code(&text.into()).unwrap();
547 page_table.map_rodata(&rodata.into()).unwrap();
548 page_table.map_data(&data_bss.into()).unwrap();
549 page_table.map_data(&eh_stack.into()).unwrap();
550 page_table.map_data(&stack.into()).unwrap();
551
552 page_table
553 }
554 }
555
556 impl Drop for MemoryTracker {
drop(&mut self)557 fn drop(&mut self) {
558 set_dbm_enabled(false);
559 self.flush_dirty_pages().unwrap();
560 self.unshare_all_memory();
561 }
562 }
563