xref: /aosp_15_r20/external/crosvm/base/src/sys/linux/mmap.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! The mmap module provides a safe interface to mmap memory and ensures unmap is called when the
6 //! mmap object leaves scope.
7 
8 use std::ptr::null_mut;
9 
10 use libc::c_int;
11 use libc::PROT_READ;
12 use libc::PROT_WRITE;
13 use log::warn;
14 
15 use super::Error as ErrnoError;
16 use crate::pagesize;
17 use crate::AsRawDescriptor;
18 use crate::Descriptor;
19 use crate::MappedRegion;
20 use crate::MemoryMapping as CrateMemoryMapping;
21 use crate::MemoryMappingBuilder;
22 use crate::MmapError as Error;
23 use crate::MmapResult as Result;
24 use crate::Protection;
25 use crate::RawDescriptor;
26 use crate::SafeDescriptor;
27 
28 impl From<Protection> for c_int {
29     #[inline(always)]
from(p: Protection) -> Self30     fn from(p: Protection) -> Self {
31         let mut value = 0;
32         if p.read {
33             value |= PROT_READ
34         }
35         if p.write {
36             value |= PROT_WRITE;
37         }
38         value
39     }
40 }
41 
42 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
43 /// `mmap_size` bytes.  Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>44 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
45     // Ensure offset + size doesn't overflow
46     let end_offset = offset
47         .checked_add(range_size)
48         .ok_or(Error::InvalidAddress)?;
49     // Ensure offset + size are within the mapping bounds
50     if end_offset <= mmap_size {
51         Ok(())
52     } else {
53         Err(Error::InvalidAddress)
54     }
55 }
56 
57 impl dyn MappedRegion {
58     /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
59     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>60     pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
61         validate_includes_range(self.size(), offset, size)?;
62 
63         // SAFETY:
64         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
65         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
66         // this `MappedRegion`.
67         let ret = unsafe {
68             libc::msync(
69                 (self.as_ptr() as usize + offset) as *mut libc::c_void,
70                 size,
71                 libc::MS_SYNC,
72             )
73         };
74         if ret != -1 {
75             Ok(())
76         } else {
77             Err(Error::SystemCallFailed(ErrnoError::last()))
78         }
79     }
80 
81     /// Calls madvise on a mapping of `size` bytes starting at `offset` from the start of
82     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
madvise(&self, offset: usize, size: usize, advice: libc::c_int) -> Result<()>83     pub fn madvise(&self, offset: usize, size: usize, advice: libc::c_int) -> Result<()> {
84         validate_includes_range(self.size(), offset, size)?;
85 
86         // SAFETY:
87         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
88         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
89         // this `MappedRegion`.
90         let ret = unsafe {
91             libc::madvise(
92                 (self.as_ptr() as usize + offset) as *mut libc::c_void,
93                 size,
94                 advice,
95             )
96         };
97         if ret != -1 {
98             Ok(())
99         } else {
100             Err(Error::SystemCallFailed(ErrnoError::last()))
101         }
102     }
103 }
104 
105 /// Wraps an anonymous shared memory mapping in the current process. Provides
106 /// RAII semantics including munmap when no longer needed.
107 #[derive(Debug)]
108 pub struct MemoryMapping {
109     addr: *mut u8,
110     size: usize,
111 }
112 
113 // SAFETY:
114 // Send and Sync aren't automatically inherited for the raw address pointer.
115 // Accessing that pointer is only done through the stateless interface which
116 // allows the object to be shared by multiple threads without a decrease in
117 // safety.
118 unsafe impl Send for MemoryMapping {}
119 // SAFETY: See safety comments for impl Send
120 unsafe impl Sync for MemoryMapping {}
121 
122 impl MemoryMapping {
123     /// Creates an anonymous shared, read/write mapping of `size` bytes.
124     ///
125     /// # Arguments
126     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>127     pub fn new(size: usize) -> Result<MemoryMapping> {
128         MemoryMapping::new_protection(size, None, Protection::read_write())
129     }
130 
131     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
132     ///
133     /// # Arguments
134     /// * `size` - Size of memory region in bytes.
135     /// * `align` - Optional alignment for MemoryMapping::addr.
136     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
new_protection( size: usize, align: Option<u64>, prot: Protection, ) -> Result<MemoryMapping>137     pub fn new_protection(
138         size: usize,
139         align: Option<u64>,
140         prot: Protection,
141     ) -> Result<MemoryMapping> {
142         // SAFETY:
143         // This is safe because we are creating an anonymous mapping in a place not already used by
144         // any other area in this process.
145         unsafe { MemoryMapping::try_mmap(None, size, align, prot.into(), None) }
146     }
147 
148     /// Maps the first `size` bytes of the given `fd` as read/write.
149     ///
150     /// # Arguments
151     /// * `fd` - File descriptor to mmap from.
152     /// * `size` - Size of memory region in bytes.
from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping>153     pub fn from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping> {
154         MemoryMapping::from_fd_offset(fd, size, 0)
155     }
156 
from_fd_offset( fd: &dyn AsRawDescriptor, size: usize, offset: u64, ) -> Result<MemoryMapping>157     pub fn from_fd_offset(
158         fd: &dyn AsRawDescriptor,
159         size: usize,
160         offset: u64,
161     ) -> Result<MemoryMapping> {
162         MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
163     }
164 
165     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
166     ///
167     /// # Arguments
168     /// * `fd` - File descriptor to mmap from.
169     /// * `size` - Size of memory region in bytes.
170     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
171     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_protection( fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>172     pub fn from_fd_offset_protection(
173         fd: &dyn AsRawDescriptor,
174         size: usize,
175         offset: u64,
176         prot: Protection,
177     ) -> Result<MemoryMapping> {
178         MemoryMapping::from_fd_offset_protection_populate(fd, size, offset, 0, prot, false)
179     }
180 
181     /// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
182     /// that the pages are pre-populated.
183     /// # Arguments
184     /// * `fd` - File descriptor to mmap from.
185     /// * `size` - Size of memory region in bytes.
186     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
187     /// * `align` - Alignment for MemoryMapping::addr.
188     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
189     /// * `populate` - Populate (prefault) page tables for a mapping.
from_fd_offset_protection_populate( fd: &dyn AsRawDescriptor, size: usize, offset: u64, align: u64, prot: Protection, populate: bool, ) -> Result<MemoryMapping>190     pub fn from_fd_offset_protection_populate(
191         fd: &dyn AsRawDescriptor,
192         size: usize,
193         offset: u64,
194         align: u64,
195         prot: Protection,
196         populate: bool,
197     ) -> Result<MemoryMapping> {
198         // SAFETY:
199         // This is safe because we are creating an anonymous mapping in a place not already used
200         // by any other area in this process.
201         unsafe {
202             MemoryMapping::try_mmap_populate(
203                 None,
204                 size,
205                 Some(align),
206                 prot.into(),
207                 Some((fd, offset)),
208                 populate,
209             )
210         }
211     }
212 
213     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
214     ///
215     /// # Arguments
216     ///
217     /// * `addr` - Memory address to mmap at.
218     /// * `size` - Size of memory region in bytes.
219     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
220     ///
221     /// # Safety
222     ///
223     /// This function should not be called before the caller unmaps any mmap'd regions already
224     /// present at `(addr..addr+size)`.
new_protection_fixed( addr: *mut u8, size: usize, prot: Protection, ) -> Result<MemoryMapping>225     pub unsafe fn new_protection_fixed(
226         addr: *mut u8,
227         size: usize,
228         prot: Protection,
229     ) -> Result<MemoryMapping> {
230         MemoryMapping::try_mmap(Some(addr), size, None, prot.into(), None)
231     }
232 
233     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` with
234     /// `prot` protections.
235     ///
236     /// # Arguments
237     ///
238     /// * `addr` - Memory address to mmap at.
239     /// * `fd` - File descriptor to mmap from.
240     /// * `size` - Size of memory region in bytes.
241     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
242     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
243     ///
244     /// # Safety
245     ///
246     /// This function should not be called before the caller unmaps any mmap'd regions already
247     /// present at `(addr..addr+size)`.
from_descriptor_offset_protection_fixed( addr: *mut u8, fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>248     pub unsafe fn from_descriptor_offset_protection_fixed(
249         addr: *mut u8,
250         fd: &dyn AsRawDescriptor,
251         size: usize,
252         offset: u64,
253         prot: Protection,
254     ) -> Result<MemoryMapping> {
255         MemoryMapping::try_mmap(Some(addr), size, None, prot.into(), Some((fd, offset)))
256     }
257 
258     /// Helper wrapper around try_mmap_populate when without MAP_POPULATE
try_mmap( addr: Option<*mut u8>, size: usize, align: Option<u64>, prot: c_int, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<MemoryMapping>259     unsafe fn try_mmap(
260         addr: Option<*mut u8>,
261         size: usize,
262         align: Option<u64>,
263         prot: c_int,
264         fd: Option<(&dyn AsRawDescriptor, u64)>,
265     ) -> Result<MemoryMapping> {
266         MemoryMapping::try_mmap_populate(addr, size, align, prot, fd, false)
267     }
268 
269     /// Helper wrapper around libc::mmap that does some basic validation, and calls
270     /// madvise with MADV_DONTDUMP on the created mmap
try_mmap_populate( addr: Option<*mut u8>, size: usize, align: Option<u64>, prot: c_int, fd: Option<(&dyn AsRawDescriptor, u64)>, populate: bool, ) -> Result<MemoryMapping>271     unsafe fn try_mmap_populate(
272         addr: Option<*mut u8>,
273         size: usize,
274         align: Option<u64>,
275         prot: c_int,
276         fd: Option<(&dyn AsRawDescriptor, u64)>,
277         populate: bool,
278     ) -> Result<MemoryMapping> {
279         let mut flags = libc::MAP_SHARED;
280         if populate {
281             flags |= libc::MAP_POPULATE;
282         }
283         // If addr is provided, set the (FIXED | NORESERVE) flag, and validate addr alignment.
284         let addr = match addr {
285             Some(addr) => {
286                 if (addr as usize) % pagesize() != 0 {
287                     return Err(Error::NotPageAligned);
288                 }
289                 flags |= libc::MAP_FIXED | libc::MAP_NORESERVE;
290                 addr as *mut libc::c_void
291             }
292             None => null_mut(),
293         };
294 
295         // mmap already PAGE_SIZE align the returned address.
296         let align = if align.unwrap_or(0) == pagesize() as u64 {
297             Some(0)
298         } else {
299             align
300         };
301 
302         // Add an address if an alignment is requested.
303         let (addr, orig_addr, orig_size) = match align {
304             None | Some(0) => (addr, None, None),
305             Some(align) => {
306                 if !addr.is_null() || !align.is_power_of_two() {
307                     return Err(Error::InvalidAlignment);
308                 }
309                 let orig_size = size + align as usize;
310                 let orig_addr = libc::mmap64(
311                     null_mut(),
312                     orig_size,
313                     prot,
314                     libc::MAP_PRIVATE | libc::MAP_NORESERVE | libc::MAP_ANONYMOUS,
315                     -1,
316                     0,
317                 );
318                 if orig_addr == libc::MAP_FAILED {
319                     return Err(Error::SystemCallFailed(ErrnoError::last()));
320                 }
321 
322                 flags |= libc::MAP_FIXED;
323 
324                 let mask = align - 1;
325                 (
326                     (orig_addr.wrapping_add(mask as usize) as u64 & !mask) as *mut libc::c_void,
327                     Some(orig_addr),
328                     Some(orig_size),
329                 )
330             }
331         };
332 
333         // If fd is provided, validate fd offset is within bounds. If not, it's anonymous mapping
334         // and set the (ANONYMOUS | NORESERVE) flag.
335         let (fd, offset) = match fd {
336             Some((fd, offset)) => {
337                 if offset > libc::off64_t::MAX as u64 {
338                     return Err(Error::InvalidOffset);
339                 }
340                 // Map private for read-only seal. See below for upstream relax of the restriction.
341                 // - https://lore.kernel.org/bpf/20231013103208.kdffpyerufr4ygnw@quack3/T/
342                 // SAFETY:
343                 // Safe because no third parameter is expected and we check the return result.
344                 let seals = unsafe { libc::fcntl(fd.as_raw_descriptor(), libc::F_GET_SEALS) };
345                 if (seals >= 0) && (seals & libc::F_SEAL_WRITE != 0) {
346                     flags &= !libc::MAP_SHARED;
347                     flags |= libc::MAP_PRIVATE;
348                 }
349                 (fd.as_raw_descriptor(), offset as libc::off64_t)
350             }
351             None => {
352                 flags |= libc::MAP_ANONYMOUS | libc::MAP_NORESERVE;
353                 (-1, 0)
354             }
355         };
356         let addr = libc::mmap64(addr, size, prot, flags, fd, offset);
357         if addr == libc::MAP_FAILED {
358             return Err(Error::SystemCallFailed(ErrnoError::last()));
359         }
360 
361         // If an original mmap exists, we can now remove the unused regions
362         if let Some(orig_addr) = orig_addr {
363             let mut unmap_start = orig_addr as usize;
364             let mut unmap_end = addr as usize;
365             let mut unmap_size = unmap_end - unmap_start;
366 
367             if unmap_size > 0 {
368                 libc::munmap(orig_addr, unmap_size);
369             }
370 
371             unmap_start = addr as usize + size;
372             unmap_end = orig_addr as usize + orig_size.unwrap();
373             unmap_size = unmap_end - unmap_start;
374 
375             if unmap_size > 0 {
376                 libc::munmap(unmap_start as *mut libc::c_void, unmap_size);
377             }
378         }
379 
380         // This is safe because we call madvise with a valid address and size.
381         let _ = libc::madvise(addr, size, libc::MADV_DONTDUMP);
382 
383         // This is safe because KSM's only userspace visible effects are timing
384         // and memory consumption; it doesn't affect rust safety semantics.
385         // KSM is also disabled by default, and this flag is only a hint.
386         let _ = libc::madvise(addr, size, libc::MADV_MERGEABLE);
387 
388         Ok(MemoryMapping {
389             addr: addr as *mut u8,
390             size,
391         })
392     }
393 
394     /// Madvise the kernel to unmap on fork.
use_dontfork(&self) -> Result<()>395     pub fn use_dontfork(&self) -> Result<()> {
396         // SAFETY:
397         // This is safe because we call madvise with a valid address and size, and we check the
398         // return value.
399         let ret = unsafe {
400             libc::madvise(
401                 self.as_ptr() as *mut libc::c_void,
402                 self.size(),
403                 libc::MADV_DONTFORK,
404             )
405         };
406         if ret == -1 {
407             Err(Error::SystemCallFailed(ErrnoError::last()))
408         } else {
409             Ok(())
410         }
411     }
412 
413     /// Madvise the kernel to use Huge Pages for this mapping.
use_hugepages(&self) -> Result<()>414     pub fn use_hugepages(&self) -> Result<()> {
415         const SZ_2M: usize = 2 * 1024 * 1024;
416 
417         // THP uses 2M pages, so use THP only on mappings that are at least
418         // 2M in size.
419         if self.size() < SZ_2M {
420             return Ok(());
421         }
422 
423         // SAFETY:
424         // This is safe because we call madvise with a valid address and size, and we check the
425         // return value.
426         let ret = unsafe {
427             libc::madvise(
428                 self.as_ptr() as *mut libc::c_void,
429                 self.size(),
430                 libc::MADV_HUGEPAGE,
431             )
432         };
433         if ret == -1 {
434             Err(Error::SystemCallFailed(ErrnoError::last()))
435         } else {
436             Ok(())
437         }
438     }
439 
440     /// Calls msync with MS_SYNC on the mapping.
msync(&self) -> Result<()>441     pub fn msync(&self) -> Result<()> {
442         // SAFETY:
443         // This is safe since we use the exact address and length of a known
444         // good memory mapping.
445         let ret = unsafe {
446             libc::msync(
447                 self.as_ptr() as *mut libc::c_void,
448                 self.size(),
449                 libc::MS_SYNC,
450             )
451         };
452         if ret == -1 {
453             return Err(Error::SystemCallFailed(ErrnoError::last()));
454         }
455         Ok(())
456     }
457 
458     /// Uses madvise to tell the kernel to remove the specified range.  Subsequent reads
459     /// to the pages in the range will return zero bytes.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>460     pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
461         self.range_end(mem_offset, count)
462             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
463         // SAFETY: Safe because all the args to madvise are valid and the return
464         // value is checked.
465         let ret = unsafe {
466             // madvising away the region is the same as the guest changing it.
467             // Next time it is read, it may return zero pages.
468             libc::madvise(
469                 (self.addr as usize + mem_offset) as *mut _,
470                 count,
471                 libc::MADV_REMOVE,
472             )
473         };
474         if ret < 0 {
475             Err(Error::SystemCallFailed(super::Error::last()))
476         } else {
477             Ok(())
478         }
479     }
480 
481     /// Tell the kernel to readahead the range.
482     ///
483     /// This does not block the thread by I/O wait from reading the backed file. This does not
484     /// guarantee that the pages are surely present unless the pages are mlock(2)ed by
485     /// `lock_on_fault_unchecked()`.
486     ///
487     /// The `mem_offset` and `count` must be validated by caller.
488     ///
489     /// # Arguments
490     ///
491     /// * `mem_offset` - The offset of the head of the range.
492     /// * `count` - The size in bytes of the range.
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>493     pub fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()> {
494         // Validation
495         self.range_end(mem_offset, count)
496             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
497         // SAFETY:
498         // Safe because populating the pages from the backed file does not affect the Rust memory
499         // safety.
500         let ret = unsafe {
501             libc::madvise(
502                 (self.addr as usize + mem_offset) as *mut _,
503                 count,
504                 libc::MADV_WILLNEED,
505             )
506         };
507         if ret < 0 {
508             Err(Error::SystemCallFailed(super::Error::last()))
509         } else {
510             Ok(())
511         }
512     }
513 
514     /// Tell the kernel to drop the page cache.
515     ///
516     /// This cannot be applied to locked pages.
517     ///
518     /// The `mem_offset` and `count` must be validated by caller.
519     ///
520     /// NOTE: This function has destructive semantics. It throws away data in the page cache without
521     /// writing it to the backing file. If the data is important, the caller should ensure it is
522     /// written to disk before calling this function or should use MADV_PAGEOUT instead.
523     ///
524     /// # Arguments
525     ///
526     /// * `mem_offset` - The offset of the head of the range.
527     /// * `count` - The size in bytes of the range.
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>528     pub fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()> {
529         // Validation
530         self.range_end(mem_offset, count)
531             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
532         // SAFETY:
533         // Safe because dropping the page cache does not affect the Rust memory safety.
534         let ret = unsafe {
535             libc::madvise(
536                 (self.addr as usize + mem_offset) as *mut _,
537                 count,
538                 libc::MADV_DONTNEED,
539             )
540         };
541         if ret < 0 {
542             Err(Error::SystemCallFailed(super::Error::last()))
543         } else {
544             Ok(())
545         }
546     }
547 
548     /// Lock the resident pages in the range not to be swapped out.
549     ///
550     /// The remaining nonresident page are locked when they are populated.
551     ///
552     /// The `mem_offset` and `count` must be validated by caller.
553     ///
554     /// # Arguments
555     ///
556     /// * `mem_offset` - The offset of the head of the range.
557     /// * `count` - The size in bytes of the range.
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>558     pub fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()> {
559         // Validation
560         self.range_end(mem_offset, count)
561             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
562         let addr = self.addr as usize + mem_offset;
563         // SAFETY:
564         // Safe because MLOCK_ONFAULT only affects the swap behavior of the kernel, so it has no
565         // impact on rust semantics.
566         // let ret = unsafe { libc::mlock2(addr as *mut _, count, libc::MLOCK_ONFAULT) };
567         // ANDROID(b/274805769): android glibc doesn't have mlock2, so we need to make the syscall directly.
568         let ret = unsafe {
569             libc::syscall(
570                 libc::SYS_mlock2,
571                 addr as *mut libc::c_void,
572                 count,
573                 libc::MLOCK_ONFAULT,
574             )
575         };
576         if ret < 0 {
577             let errno = super::Error::last();
578             warn!(
579                 "failed to mlock at {:#x} with length {}: {}",
580                 addr as u64,
581                 self.size(),
582                 errno,
583             );
584             Err(Error::SystemCallFailed(errno))
585         } else {
586             Ok(())
587         }
588     }
589 
590     /// Unlock the range of pages.
591     ///
592     /// Unlocking non-locked pages does not fail.
593     ///
594     /// The `mem_offset` and `count` must be validated by caller.
595     ///
596     /// # Arguments
597     ///
598     /// * `mem_offset` - The offset of the head of the range.
599     /// * `count` - The size in bytes of the range.
unlock(&self, mem_offset: usize, count: usize) -> Result<()>600     pub fn unlock(&self, mem_offset: usize, count: usize) -> Result<()> {
601         // Validation
602         self.range_end(mem_offset, count)
603             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
604         // SAFETY:
605         // Safe because munlock(2) does not affect the Rust memory safety.
606         let ret = unsafe { libc::munlock((self.addr as usize + mem_offset) as *mut _, count) };
607         if ret < 0 {
608             Err(Error::SystemCallFailed(super::Error::last()))
609         } else {
610             Ok(())
611         }
612     }
613 
614     // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>615     pub(crate) fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
616         let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
617         if mem_end > self.size() {
618             return Err(Error::InvalidAddress);
619         }
620         Ok(mem_end)
621     }
622 }
623 
624 // SAFETY:
625 // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
626 // be unmapped until it's Dropped.
627 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8628     fn as_ptr(&self) -> *mut u8 {
629         self.addr
630     }
631 
size(&self) -> usize632     fn size(&self) -> usize {
633         self.size
634     }
635 }
636 
637 impl Drop for MemoryMapping {
drop(&mut self)638     fn drop(&mut self) {
639         // SAFETY:
640         // This is safe because we mmap the area at addr ourselves, and nobody
641         // else is holding a reference to it.
642         unsafe {
643             libc::munmap(self.addr as *mut libc::c_void, self.size);
644         }
645     }
646 }
647 
648 /// Tracks Fixed Memory Maps within an anonymous memory-mapped fixed-sized arena
649 /// in the current process.
650 pub struct MemoryMappingArena {
651     addr: *mut u8,
652     size: usize,
653 }
654 
655 // SAFETY:
656 // Send and Sync aren't automatically inherited for the raw address pointer.
657 // Accessing that pointer is only done through the stateless interface which
658 // allows the object to be shared by multiple threads without a decrease in
659 // safety.
660 unsafe impl Send for MemoryMappingArena {}
661 // SAFETY: See safety comments for impl Send
662 unsafe impl Sync for MemoryMappingArena {}
663 
664 impl MemoryMappingArena {
665     /// Creates an mmap arena of `size` bytes.
666     ///
667     /// # Arguments
668     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMappingArena>669     pub fn new(size: usize) -> Result<MemoryMappingArena> {
670         // Reserve the arena's memory using an anonymous read-only mmap.
671         MemoryMapping::new_protection(size, None, Protection::read()).map(From::from)
672     }
673 
674     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena
675     /// with `prot` protections. `offset` must be page aligned.
676     ///
677     /// # Arguments
678     /// * `offset` - Page aligned offset into the arena in bytes.
679     /// * `size` - Size of memory region in bytes.
680     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_anon_protection( &mut self, offset: usize, size: usize, prot: Protection, ) -> Result<()>681     pub fn add_anon_protection(
682         &mut self,
683         offset: usize,
684         size: usize,
685         prot: Protection,
686     ) -> Result<()> {
687         self.try_add(offset, size, prot, None)
688     }
689 
690     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
691     /// `offset` must be page aligned.
692     ///
693     /// # Arguments
694     /// * `offset` - Page aligned offset into the arena in bytes.
695     /// * `size` - Size of memory region in bytes.
add_anon(&mut self, offset: usize, size: usize) -> Result<()>696     pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> {
697         self.add_anon_protection(offset, size, Protection::read_write())
698     }
699 
700     /// Maps `size` bytes from the start of the given `fd` at `offset` bytes from
701     /// the start of the arena. `offset` must be page aligned.
702     ///
703     /// # Arguments
704     /// * `offset` - Page aligned offset into the arena in bytes.
705     /// * `size` - Size of memory region in bytes.
706     /// * `fd` - File descriptor to mmap from.
add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()>707     pub fn add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()> {
708         self.add_fd_offset(offset, size, fd, 0)
709     }
710 
711     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
712     /// at `offset` bytes from the start of the arena. `offset` must be page aligned.
713     ///
714     /// # Arguments
715     /// * `offset` - Page aligned offset into the arena in bytes.
716     /// * `size` - Size of memory region in bytes.
717     /// * `fd` - File descriptor to mmap from.
718     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
add_fd_offset( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, ) -> Result<()>719     pub fn add_fd_offset(
720         &mut self,
721         offset: usize,
722         size: usize,
723         fd: &dyn AsRawDescriptor,
724         fd_offset: u64,
725     ) -> Result<()> {
726         self.add_fd_offset_protection(offset, size, fd, fd_offset, Protection::read_write())
727     }
728 
729     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
730     /// at `offset` bytes from the start of the arena with `prot` protections.
731     /// `offset` must be page aligned.
732     ///
733     /// # Arguments
734     /// * `offset` - Page aligned offset into the arena in bytes.
735     /// * `size` - Size of memory region in bytes.
736     /// * `fd` - File descriptor to mmap from.
737     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
738     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_offset_protection( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>739     pub fn add_fd_offset_protection(
740         &mut self,
741         offset: usize,
742         size: usize,
743         fd: &dyn AsRawDescriptor,
744         fd_offset: u64,
745         prot: Protection,
746     ) -> Result<()> {
747         self.try_add(offset, size, prot, Some((fd, fd_offset)))
748     }
749 
750     /// Helper method that calls appropriate MemoryMapping constructor and adds
751     /// the resulting map into the arena.
try_add( &mut self, offset: usize, size: usize, prot: Protection, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<()>752     fn try_add(
753         &mut self,
754         offset: usize,
755         size: usize,
756         prot: Protection,
757         fd: Option<(&dyn AsRawDescriptor, u64)>,
758     ) -> Result<()> {
759         // Ensure offset is page-aligned
760         if offset % pagesize() != 0 {
761             return Err(Error::NotPageAligned);
762         }
763         validate_includes_range(self.size(), offset, size)?;
764 
765         // SAFETY:
766         // This is safe since the range has been validated.
767         let mmap = unsafe {
768             match fd {
769                 Some((fd, fd_offset)) => MemoryMapping::from_descriptor_offset_protection_fixed(
770                     self.addr.add(offset),
771                     fd,
772                     size,
773                     fd_offset,
774                     prot,
775                 )?,
776                 None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
777             }
778         };
779 
780         // This mapping will get automatically removed when we drop the whole arena.
781         std::mem::forget(mmap);
782         Ok(())
783     }
784 
785     /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
786     /// aligned.
787     ///
788     /// # Arguments
789     /// * `offset` - Page aligned offset into the arena in bytes.
790     /// * `size` - Size of memory region in bytes.
remove(&mut self, offset: usize, size: usize) -> Result<()>791     pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
792         self.try_add(offset, size, Protection::read(), None)
793     }
794 }
795 
796 // SAFETY:
797 // Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that
798 // won't be unmapped until it's Dropped.
799 unsafe impl MappedRegion for MemoryMappingArena {
as_ptr(&self) -> *mut u8800     fn as_ptr(&self) -> *mut u8 {
801         self.addr
802     }
803 
size(&self) -> usize804     fn size(&self) -> usize {
805         self.size
806     }
807 
add_fd_mapping( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>808     fn add_fd_mapping(
809         &mut self,
810         offset: usize,
811         size: usize,
812         fd: &dyn AsRawDescriptor,
813         fd_offset: u64,
814         prot: Protection,
815     ) -> Result<()> {
816         self.add_fd_offset_protection(offset, size, fd, fd_offset, prot)
817     }
818 
remove_mapping(&mut self, offset: usize, size: usize) -> Result<()>819     fn remove_mapping(&mut self, offset: usize, size: usize) -> Result<()> {
820         self.remove(offset, size)
821     }
822 }
823 
824 impl From<MemoryMapping> for MemoryMappingArena {
from(mmap: MemoryMapping) -> Self825     fn from(mmap: MemoryMapping) -> Self {
826         let addr = mmap.as_ptr();
827         let size = mmap.size();
828 
829         // Forget the original mapping because the `MemoryMappingArena` will take care of calling
830         // `munmap` when it is dropped.
831         std::mem::forget(mmap);
832         MemoryMappingArena { addr, size }
833     }
834 }
835 
836 impl From<CrateMemoryMapping> for MemoryMappingArena {
from(mmap: CrateMemoryMapping) -> Self837     fn from(mmap: CrateMemoryMapping) -> Self {
838         MemoryMappingArena::from(mmap.mapping)
839     }
840 }
841 
842 impl Drop for MemoryMappingArena {
drop(&mut self)843     fn drop(&mut self) {
844         // SAFETY:
845         // This is safe because we own this memory range, and nobody else is holding a reference to
846         // it.
847         unsafe {
848             libc::munmap(self.addr as *mut libc::c_void, self.size);
849         }
850     }
851 }
852 
853 impl CrateMemoryMapping {
use_dontfork(&self) -> Result<()>854     pub fn use_dontfork(&self) -> Result<()> {
855         self.mapping.use_dontfork()
856     }
857 
use_hugepages(&self) -> Result<()>858     pub fn use_hugepages(&self) -> Result<()> {
859         self.mapping.use_hugepages()
860     }
861 
from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping>862     pub fn from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping> {
863         MemoryMapping::from_fd_offset(&Descriptor(addr), size, 0).map(|mapping| {
864             CrateMemoryMapping {
865                 mapping,
866                 _file_descriptor: None,
867             }
868         })
869     }
870 }
871 
872 pub trait MemoryMappingUnix {
873     /// Remove the specified range from the mapping.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>874     fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()>;
875     /// Tell the kernel to readahead the range.
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>876     fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>;
877     /// Tell the kernel to drop the page cache.
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>878     fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>;
879     /// Lock the resident pages in the range not to be swapped out.
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>880     fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>;
881     /// Unlock the range of pages.
unlock(&self, mem_offset: usize, count: usize) -> Result<()>882     fn unlock(&self, mem_offset: usize, count: usize) -> Result<()>;
883     /// Disable host swap for this mapping.
lock_all(&self) -> Result<()>884     fn lock_all(&self) -> Result<()>;
885 }
886 
887 impl MemoryMappingUnix for CrateMemoryMapping {
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>888     fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
889         self.mapping.remove_range(mem_offset, count)
890     }
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>891     fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()> {
892         self.mapping.async_prefetch(mem_offset, count)
893     }
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>894     fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()> {
895         self.mapping.drop_page_cache(mem_offset, count)
896     }
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>897     fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()> {
898         self.mapping.lock_on_fault(mem_offset, count)
899     }
unlock(&self, mem_offset: usize, count: usize) -> Result<()>900     fn unlock(&self, mem_offset: usize, count: usize) -> Result<()> {
901         self.mapping.unlock(mem_offset, count)
902     }
lock_all(&self) -> Result<()>903     fn lock_all(&self) -> Result<()> {
904         self.mapping.lock_on_fault(0, self.mapping.size())
905     }
906 }
907 
908 pub trait MemoryMappingBuilderUnix<'a> {
909     #[allow(clippy::wrong_self_convention)]
from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder910     fn from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder;
911 }
912 
913 impl<'a> MemoryMappingBuilderUnix<'a> for MemoryMappingBuilder<'a> {
914     /// Build the memory mapping given the specified descriptor to mapped memory
915     ///
916     /// Default: Create a new memory mapping.
917     #[allow(clippy::wrong_self_convention)]
from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder918     fn from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder {
919         self.descriptor = Some(descriptor);
920         self
921     }
922 }
923 
924 impl<'a> MemoryMappingBuilder<'a> {
925     /// Request that the mapped pages are pre-populated
926     ///
927     /// Default: Do not populate
populate(mut self) -> MemoryMappingBuilder<'a>928     pub fn populate(mut self) -> MemoryMappingBuilder<'a> {
929         self.populate = true;
930         self
931     }
932 
933     /// Build a MemoryMapping from the provided options.
build(self) -> Result<CrateMemoryMapping>934     pub fn build(self) -> Result<CrateMemoryMapping> {
935         match self.descriptor {
936             None => {
937                 if self.populate {
938                     // Population not supported for new mmaps
939                     return Err(Error::InvalidArgument);
940                 }
941                 MemoryMappingBuilder::wrap(
942                     MemoryMapping::new_protection(
943                         self.size,
944                         self.align,
945                         self.protection.unwrap_or_else(Protection::read_write),
946                     )?,
947                     None,
948                 )
949             }
950             Some(descriptor) => MemoryMappingBuilder::wrap(
951                 MemoryMapping::from_fd_offset_protection_populate(
952                     descriptor,
953                     self.size,
954                     self.offset.unwrap_or(0),
955                     self.align.unwrap_or(0),
956                     self.protection.unwrap_or_else(Protection::read_write),
957                     self.populate,
958                 )?,
959                 None,
960             ),
961         }
962     }
963 
wrap( mapping: MemoryMapping, file_descriptor: Option<&'a dyn AsRawDescriptor>, ) -> Result<CrateMemoryMapping>964     pub(crate) fn wrap(
965         mapping: MemoryMapping,
966         file_descriptor: Option<&'a dyn AsRawDescriptor>,
967     ) -> Result<CrateMemoryMapping> {
968         let file_descriptor = match file_descriptor {
969             Some(descriptor) => Some(
970                 SafeDescriptor::try_from(descriptor)
971                     .map_err(|_| Error::SystemCallFailed(ErrnoError::last()))?,
972             ),
973             None => None,
974         };
975         Ok(CrateMemoryMapping {
976             mapping,
977             _file_descriptor: file_descriptor,
978         })
979     }
980 }
981 
982 #[cfg(test)]
983 mod tests {
984     use tempfile::tempfile;
985 
986     use super::*;
987     use crate::descriptor::Descriptor;
988     use crate::VolatileMemory;
989     use crate::VolatileMemoryError;
990 
991     #[test]
basic_map()992     fn basic_map() {
993         let m = MemoryMappingBuilder::new(1024).build().unwrap();
994         assert_eq!(1024, m.size());
995     }
996 
997     #[test]
map_invalid_size()998     fn map_invalid_size() {
999         let res = MemoryMappingBuilder::new(0).build().unwrap_err();
1000         if let Error::SystemCallFailed(e) = res {
1001             assert_eq!(e.errno(), libc::EINVAL);
1002         } else {
1003             panic!("unexpected error: {}", res);
1004         }
1005     }
1006 
1007     #[test]
map_invalid_fd()1008     fn map_invalid_fd() {
1009         let fd = Descriptor(-1);
1010         let res = MemoryMapping::from_fd(&fd, 1024).unwrap_err();
1011         if let Error::SystemCallFailed(e) = res {
1012             assert_eq!(e.errno(), libc::EBADF);
1013         } else {
1014             panic!("unexpected error: {}", res);
1015         }
1016     }
1017 
1018     #[test]
test_write_past_end()1019     fn test_write_past_end() {
1020         let m = MemoryMappingBuilder::new(5).build().unwrap();
1021         let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
1022         assert!(res.is_ok());
1023         assert_eq!(res.unwrap(), 5);
1024     }
1025 
1026     #[test]
slice_size()1027     fn slice_size() {
1028         let m = MemoryMappingBuilder::new(5).build().unwrap();
1029         let s = m.get_slice(2, 3).unwrap();
1030         assert_eq!(s.size(), 3);
1031     }
1032 
1033     #[test]
slice_addr()1034     fn slice_addr() {
1035         let m = MemoryMappingBuilder::new(5).build().unwrap();
1036         let s = m.get_slice(2, 3).unwrap();
1037         // SAFETY: all addresses are known to exist.
1038         assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
1039     }
1040 
1041     #[test]
slice_overflow_error()1042     fn slice_overflow_error() {
1043         let m = MemoryMappingBuilder::new(5).build().unwrap();
1044         let res = m.get_slice(usize::MAX, 3).unwrap_err();
1045         assert_eq!(
1046             res,
1047             VolatileMemoryError::Overflow {
1048                 base: usize::MAX,
1049                 offset: 3,
1050             }
1051         );
1052     }
1053     #[test]
slice_oob_error()1054     fn slice_oob_error() {
1055         let m = MemoryMappingBuilder::new(5).build().unwrap();
1056         let res = m.get_slice(3, 3).unwrap_err();
1057         assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
1058     }
1059 
1060     #[test]
from_fd_offset_invalid()1061     fn from_fd_offset_invalid() {
1062         let fd = tempfile().unwrap();
1063         let res =
1064             MemoryMapping::from_fd_offset(&fd, 4096, (libc::off64_t::MAX as u64) + 1).unwrap_err();
1065         match res {
1066             Error::InvalidOffset => {}
1067             e => panic!("unexpected error: {}", e),
1068         }
1069     }
1070 
1071     #[test]
arena_new()1072     fn arena_new() {
1073         let m = MemoryMappingArena::new(0x40000).unwrap();
1074         assert_eq!(m.size(), 0x40000);
1075     }
1076 
1077     #[test]
arena_add()1078     fn arena_add() {
1079         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1080         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1081     }
1082 
1083     #[test]
arena_remove()1084     fn arena_remove() {
1085         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1086         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1087         assert!(m.remove(0, pagesize()).is_ok());
1088         assert!(m.remove(0, pagesize() * 2).is_ok());
1089     }
1090 
1091     #[test]
arena_add_alignment_error()1092     fn arena_add_alignment_error() {
1093         let mut m = MemoryMappingArena::new(pagesize() * 2).unwrap();
1094         assert!(m.add_anon(0, 0x100).is_ok());
1095         let res = m.add_anon(pagesize() + 1, 0x100).unwrap_err();
1096         match res {
1097             Error::NotPageAligned => {}
1098             e => panic!("unexpected error: {}", e),
1099         }
1100     }
1101 
1102     #[test]
arena_add_oob_error()1103     fn arena_add_oob_error() {
1104         let mut m = MemoryMappingArena::new(pagesize()).unwrap();
1105         let res = m.add_anon(0, pagesize() + 1).unwrap_err();
1106         match res {
1107             Error::InvalidAddress => {}
1108             e => panic!("unexpected error: {}", e),
1109         }
1110     }
1111 
1112     #[test]
arena_add_overlapping()1113     fn arena_add_overlapping() {
1114         let ps = pagesize();
1115         let mut m =
1116             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1117         m.add_anon(ps * 4, ps * 4)
1118             .expect("failed to add sub-mapping");
1119 
1120         // Overlap in the front.
1121         m.add_anon(ps * 2, ps * 3)
1122             .expect("failed to add front overlapping sub-mapping");
1123 
1124         // Overlap in the back.
1125         m.add_anon(ps * 7, ps * 3)
1126             .expect("failed to add back overlapping sub-mapping");
1127 
1128         // Overlap the back of the first mapping, all of the middle mapping, and the front of the
1129         // last mapping.
1130         m.add_anon(ps * 3, ps * 6)
1131             .expect("failed to add mapping that overlaps several mappings");
1132     }
1133 
1134     #[test]
arena_remove_overlapping()1135     fn arena_remove_overlapping() {
1136         let ps = pagesize();
1137         let mut m =
1138             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1139         m.add_anon(ps * 4, ps * 4)
1140             .expect("failed to add sub-mapping");
1141         m.add_anon(ps * 2, ps * 2)
1142             .expect("failed to add front overlapping sub-mapping");
1143         m.add_anon(ps * 8, ps * 2)
1144             .expect("failed to add back overlapping sub-mapping");
1145 
1146         // Remove the back of the first mapping and the front of the second.
1147         m.remove(ps * 3, ps * 2)
1148             .expect("failed to remove front overlapping mapping");
1149 
1150         // Remove the back of the second mapping and the front of the third.
1151         m.remove(ps * 7, ps * 2)
1152             .expect("failed to remove back overlapping mapping");
1153 
1154         // Remove a mapping that completely overlaps the middle mapping.
1155         m.remove(ps * 5, ps * 2)
1156             .expect("failed to remove fully overlapping mapping");
1157     }
1158 
1159     #[test]
arena_remove_unaligned()1160     fn arena_remove_unaligned() {
1161         let ps = pagesize();
1162         let mut m =
1163             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1164 
1165         m.add_anon(0, ps).expect("failed to add mapping");
1166         m.remove(0, ps - 1)
1167             .expect("failed to remove unaligned mapping");
1168     }
1169 
1170     #[test]
arena_msync()1171     fn arena_msync() {
1172         let size = 0x40000;
1173         let m = MemoryMappingArena::new(size).unwrap();
1174         let ps = pagesize();
1175         <dyn MappedRegion>::msync(&m, 0, ps).unwrap();
1176         <dyn MappedRegion>::msync(&m, 0, size).unwrap();
1177         <dyn MappedRegion>::msync(&m, ps, size - ps).unwrap();
1178         let res = <dyn MappedRegion>::msync(&m, ps, size).unwrap_err();
1179         match res {
1180             Error::InvalidAddress => {}
1181             e => panic!("unexpected error: {}", e),
1182         }
1183     }
1184 
1185     #[test]
arena_madvise()1186     fn arena_madvise() {
1187         let size = 0x40000;
1188         let mut m = MemoryMappingArena::new(size).unwrap();
1189         m.add_anon_protection(0, size, Protection::read_write())
1190             .expect("failed to add writable protection for madvise MADV_REMOVE");
1191         let ps = pagesize();
1192         <dyn MappedRegion>::madvise(&m, 0, ps, libc::MADV_PAGEOUT).unwrap();
1193         <dyn MappedRegion>::madvise(&m, 0, size, libc::MADV_PAGEOUT).unwrap();
1194         <dyn MappedRegion>::madvise(&m, ps, size - ps, libc::MADV_REMOVE).unwrap();
1195         let res = <dyn MappedRegion>::madvise(&m, ps, size, libc::MADV_PAGEOUT).unwrap_err();
1196         match res {
1197             Error::InvalidAddress => {}
1198             e => panic!("unexpected error: {}", e),
1199         }
1200     }
1201 }
1202