xref: /aosp_15_r20/external/crosvm/vm_memory/src/guest_memory.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::AsRef;
8 use std::convert::TryFrom;
9 use std::fs::File;
10 use std::io::Read;
11 use std::io::Write;
12 use std::marker::Send;
13 use std::marker::Sync;
14 use std::result;
15 use std::sync::Arc;
16 
17 use anyhow::bail;
18 use anyhow::Context;
19 use base::pagesize;
20 use base::AsRawDescriptor;
21 use base::AsRawDescriptors;
22 use base::Error as SysError;
23 use base::MappedRegion;
24 use base::MemoryMapping;
25 use base::MemoryMappingBuilder;
26 use base::MmapError;
27 use base::RawDescriptor;
28 use base::SharedMemory;
29 use base::VolatileMemory;
30 use base::VolatileMemoryError;
31 use base::VolatileSlice;
32 use cros_async::mem;
33 use cros_async::BackingMemory;
34 use remain::sorted;
35 use thiserror::Error;
36 use zerocopy::AsBytes;
37 use zerocopy::FromBytes;
38 
39 use crate::guest_address::GuestAddress;
40 
41 mod sys;
42 pub use sys::MemoryPolicy;
43 
44 #[sorted]
45 #[derive(Error, Debug)]
46 pub enum Error {
47     #[error("invalid guest address {0}")]
48     InvalidGuestAddress(GuestAddress),
49     #[error("invalid offset {0}")]
50     InvalidOffset(u64),
51     #[error("size {0} must not be zero")]
52     InvalidSize(usize),
53     #[error("invalid guest memory access at addr={0}: {1}")]
54     MemoryAccess(GuestAddress, #[source] MmapError),
55     #[error("failed to set seals on shm region: {0}")]
56     MemoryAddSealsFailed(#[source] SysError),
57     #[error("failed to create shm region: {0}")]
58     MemoryCreationFailed(#[source] SysError),
59     #[error("failed to map guest memory: {0}")]
60     MemoryMappingFailed(#[source] MmapError),
61     #[error("guest memory region {0}+{1:#x} is not page aligned")]
62     MemoryNotAligned(GuestAddress, u64),
63     #[error("memory regions overlap")]
64     MemoryRegionOverlap,
65     #[error("memory region size {0} is too large")]
66     MemoryRegionTooLarge(u128),
67     #[error("incomplete read of {completed} instead of {expected} bytes")]
68     ShortRead { expected: usize, completed: usize },
69     #[error("incomplete write of {completed} instead of {expected} bytes")]
70     ShortWrite { expected: usize, completed: usize },
71     #[error("DescriptorChain split is out of bounds: {0}")]
72     SplitOutOfBounds(usize),
73     #[error("{0}")]
74     VolatileMemoryAccess(#[source] VolatileMemoryError),
75 }
76 
77 pub type Result<T> = result::Result<T, Error>;
78 
79 /// A file-like object backing `MemoryRegion`.
80 #[derive(Clone, Debug)]
81 pub enum BackingObject {
82     Shm(Arc<SharedMemory>),
83     File(Arc<File>),
84 }
85 
86 impl AsRawDescriptor for BackingObject {
as_raw_descriptor(&self) -> RawDescriptor87     fn as_raw_descriptor(&self) -> RawDescriptor {
88         match self {
89             Self::Shm(shm) => shm.as_raw_descriptor(),
90             Self::File(f) => f.as_raw_descriptor(),
91         }
92     }
93 }
94 
95 impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static)96     fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
97         match self {
98             BackingObject::Shm(shm) => shm.as_ref(),
99             BackingObject::File(f) => f.as_ref(),
100         }
101     }
102 }
103 
104 /// For MemoryRegion::regions
105 pub struct MemoryRegionInformation<'a> {
106     pub index: usize,
107     pub guest_addr: GuestAddress,
108     pub size: usize,
109     pub host_addr: usize,
110     pub shm: &'a BackingObject,
111     pub shm_offset: u64,
112     pub options: MemoryRegionOptions,
113 }
114 
115 #[sorted]
116 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
117 pub enum MemoryRegionPurpose {
118     // General purpose guest memory
119     #[default]
120     GuestMemoryRegion,
121     ProtectedFirmwareRegion,
122     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
123     StaticSwiotlbRegion,
124 }
125 
126 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
127 pub struct MemoryRegionOptions {
128     /// Some hypervisors (presently: Gunyah) need explicit knowledge about
129     /// which memory region is used for protected firwmare, static swiotlb,
130     /// or general purpose guest memory.
131     pub purpose: MemoryRegionPurpose,
132     /// Alignment for the mapping of this region. This intends to be used for
133     /// arm64 KVM support where a block alignment is required for transparent
134     /// huge-pages support
135     pub align: u64,
136 }
137 
138 impl MemoryRegionOptions {
new() -> MemoryRegionOptions139     pub fn new() -> MemoryRegionOptions {
140         Default::default()
141     }
142 
purpose(mut self, purpose: MemoryRegionPurpose) -> Self143     pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
144         self.purpose = purpose;
145         self
146     }
147 
align(mut self, alignment: u64) -> Self148     pub fn align(mut self, alignment: u64) -> Self {
149         self.align = alignment;
150         self
151     }
152 }
153 
154 /// A regions of memory mapped memory.
155 /// Holds the memory mapping with its offset in guest memory.
156 /// Also holds the backing object for the mapping and the offset in that object of the mapping.
157 #[derive(Debug)]
158 pub struct MemoryRegion {
159     mapping: MemoryMapping,
160     guest_base: GuestAddress,
161 
162     shared_obj: BackingObject,
163     obj_offset: u64,
164 
165     options: MemoryRegionOptions,
166 }
167 
168 impl MemoryRegion {
169     /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
170     /// at `guest_base` address in the guest.
new_from_shm( size: u64, guest_base: GuestAddress, offset: u64, shm: Arc<SharedMemory>, ) -> Result<Self>171     pub fn new_from_shm(
172         size: u64,
173         guest_base: GuestAddress,
174         offset: u64,
175         shm: Arc<SharedMemory>,
176     ) -> Result<Self> {
177         let mapping = MemoryMappingBuilder::new(size as usize)
178             .from_shared_memory(shm.as_ref())
179             .offset(offset)
180             .build()
181             .map_err(Error::MemoryMappingFailed)?;
182         Ok(MemoryRegion {
183             mapping,
184             guest_base,
185             shared_obj: BackingObject::Shm(shm),
186             obj_offset: offset,
187             options: Default::default(),
188         })
189     }
190 
191     /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
192     /// address in the guest.
new_from_file( size: u64, guest_base: GuestAddress, offset: u64, file: Arc<File>, ) -> Result<Self>193     pub fn new_from_file(
194         size: u64,
195         guest_base: GuestAddress,
196         offset: u64,
197         file: Arc<File>,
198     ) -> Result<Self> {
199         let mapping = MemoryMappingBuilder::new(size as usize)
200             .from_file(&file)
201             .offset(offset)
202             .build()
203             .map_err(Error::MemoryMappingFailed)?;
204         Ok(MemoryRegion {
205             mapping,
206             guest_base,
207             shared_obj: BackingObject::File(file),
208             obj_offset: offset,
209             options: Default::default(),
210         })
211     }
212 
start(&self) -> GuestAddress213     fn start(&self) -> GuestAddress {
214         self.guest_base
215     }
216 
end(&self) -> GuestAddress217     fn end(&self) -> GuestAddress {
218         // unchecked_add is safe as the region bounds were checked when it was created.
219         self.guest_base.unchecked_add(self.mapping.size() as u64)
220     }
221 
contains(&self, addr: GuestAddress) -> bool222     fn contains(&self, addr: GuestAddress) -> bool {
223         addr >= self.guest_base && addr < self.end()
224     }
225 }
226 
227 /// Tracks memory regions and where they are mapped in the guest, along with shm
228 /// descriptors of the underlying memory regions.
229 #[derive(Clone, Debug)]
230 pub struct GuestMemory {
231     regions: Arc<[MemoryRegion]>,
232 }
233 
234 impl AsRawDescriptors for GuestMemory {
235     /// USE WITH CAUTION, the descriptors returned here are not necessarily
236     /// files!
as_raw_descriptors(&self) -> Vec<RawDescriptor>237     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
238         self.regions
239             .iter()
240             .map(|r| r.shared_obj.as_raw_descriptor())
241             .collect()
242     }
243 }
244 
245 impl GuestMemory {
246     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory>247     fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
248         let mut aligned_size = 0;
249         let pg_size = pagesize();
250         for range in ranges {
251             if range.1 % pg_size as u64 != 0 {
252                 return Err(Error::MemoryNotAligned(range.0, range.1));
253             }
254 
255             aligned_size += range.1;
256         }
257 
258         // NOTE: Some tests rely on the GuestMemory's name when capturing metrics.
259         let name = "crosvm_guest";
260         // Shm must be mut even though it is only updated on Unix systems.
261         #[allow(unused_mut)]
262         let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
263 
264         sys::finalize_shm(&mut shm)?;
265 
266         Ok(shm)
267     }
268 
269     /// Creates a container for guest memory regions.
270     /// Valid memory regions are specified as a Vec of (Address, Size, MemoryRegionOptions)
new_with_options( ranges: &[(GuestAddress, u64, MemoryRegionOptions)], ) -> Result<GuestMemory>271     pub fn new_with_options(
272         ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
273     ) -> Result<GuestMemory> {
274         // Create shm
275         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
276 
277         // Create memory regions
278         let mut regions = Vec::<MemoryRegion>::new();
279         let mut offset = 0;
280 
281         for range in ranges {
282             if let Some(last) = regions.last() {
283                 if last
284                     .guest_base
285                     .checked_add(last.mapping.size() as u64)
286                     .map_or(true, |a| a > range.0)
287                 {
288                     return Err(Error::MemoryRegionOverlap);
289                 }
290             }
291 
292             let size = usize::try_from(range.1)
293                 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
294             let mapping = MemoryMappingBuilder::new(size)
295                 .from_shared_memory(shm.as_ref())
296                 .offset(offset)
297                 .align(range.2.align)
298                 .build()
299                 .map_err(Error::MemoryMappingFailed)?;
300 
301             regions.push(MemoryRegion {
302                 mapping,
303                 guest_base: range.0,
304                 shared_obj: BackingObject::Shm(shm.clone()),
305                 obj_offset: offset,
306                 options: range.2,
307             });
308 
309             offset += size as u64;
310         }
311 
312         Ok(GuestMemory {
313             regions: Arc::from(regions),
314         })
315     }
316 
317     /// Creates a container for guest memory regions.
318     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>319     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
320         GuestMemory::new_with_options(
321             ranges
322                 .iter()
323                 .map(|(addr, size)| (*addr, *size, Default::default()))
324                 .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
325                 .as_slice(),
326         )
327     }
328 
329     /// Creates a `GuestMemory` from a collection of MemoryRegions.
from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self>330     pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
331         // Sort the regions and ensure non overlap.
332         regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
333 
334         if regions.len() > 1 {
335             let mut prev_end = regions[0]
336                 .guest_base
337                 .checked_add(regions[0].mapping.size() as u64)
338                 .ok_or(Error::MemoryRegionOverlap)?;
339             for region in &regions[1..] {
340                 if prev_end > region.guest_base {
341                     return Err(Error::MemoryRegionOverlap);
342                 }
343                 prev_end = region
344                     .guest_base
345                     .checked_add(region.mapping.size() as u64)
346                     .ok_or(Error::MemoryRegionTooLarge(
347                         region.guest_base.0 as u128 + region.mapping.size() as u128,
348                     ))?;
349             }
350         }
351 
352         Ok(GuestMemory {
353             regions: Arc::from(regions),
354         })
355     }
356 
357     /// Returns the end address of memory.
358     ///
359     /// # Examples
360     ///
361     /// ```
362     /// # use base::MemoryMapping;
363     /// # use vm_memory::{GuestAddress, GuestMemory};
364     /// # fn test_end_addr() -> Result<(), ()> {
365     ///     let start_addr = GuestAddress(0x1000);
366     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
367     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
368     ///     Ok(())
369     /// # }
370     /// ```
end_addr(&self) -> GuestAddress371     pub fn end_addr(&self) -> GuestAddress {
372         self.regions
373             .iter()
374             .max_by_key(|region| region.start())
375             .map_or(GuestAddress(0), MemoryRegion::end)
376     }
377 
378     /// Returns the guest addresses and sizes of the memory regions.
guest_memory_regions(&self) -> Vec<(GuestAddress, usize)>379     pub fn guest_memory_regions(&self) -> Vec<(GuestAddress, usize)> {
380         self.regions
381             .iter()
382             .map(|region| (region.guest_base, region.mapping.size()))
383             .collect()
384     }
385 
386     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64387     pub fn memory_size(&self) -> u64 {
388         self.regions
389             .iter()
390             .map(|region| region.mapping.size() as u64)
391             .sum()
392     }
393 
394     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool395     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
396         self.regions.iter().any(|region| region.contains(addr))
397     }
398 
399     /// Returns true if the given range (start, end) is overlap with the memory range
400     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool401     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
402         self.regions
403             .iter()
404             .any(|region| region.start() < end && start < region.end())
405     }
406 
407     /// Returns an address `addr + offset` if it's in range.
408     ///
409     /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
410     /// guarantee it's a valid range, use `is_valid_range()` instead.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>411     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
412         addr.checked_add(offset).and_then(|a| {
413             if self.address_in_range(a) {
414                 Some(a)
415             } else {
416                 None
417             }
418         })
419     }
420 
421     /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
422     /// range available to the guest and it's backed by a single underlying memory region.
is_valid_range(&self, start: GuestAddress, length: u64) -> bool423     pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
424         if length == 0 {
425             return false;
426         }
427 
428         let end = if let Some(end) = start.checked_add(length - 1) {
429             end
430         } else {
431             return false;
432         };
433 
434         self.regions
435             .iter()
436             .any(|region| region.start() <= start && end < region.end())
437     }
438 
439     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64440     pub fn num_regions(&self) -> u64 {
441         self.regions.len() as u64
442     }
443 
regions(&self) -> impl Iterator<Item = MemoryRegionInformation>444     pub fn regions(&self) -> impl Iterator<Item = MemoryRegionInformation> {
445         self.regions
446             .iter()
447             .enumerate()
448             .map(|(index, region)| MemoryRegionInformation {
449                 index,
450                 guest_addr: region.start(),
451                 size: region.mapping.size(),
452                 host_addr: region.mapping.as_ptr() as usize,
453                 shm: &region.shared_obj,
454                 shm_offset: region.obj_offset,
455                 options: region.options,
456             })
457     }
458 
459     /// Writes a slice to guest memory at the specified guest address.
460     /// Returns the number of bytes written.  The number of bytes written can
461     /// be less than the length of the slice if there isn't enough room in the
462     /// memory region.
463     ///
464     /// # Examples
465     /// * Write a slice at guestaddress 0x200.
466     ///
467     /// ```
468     /// # use base::MemoryMapping;
469     /// # use vm_memory::{GuestAddress, GuestMemory};
470     /// # fn test_write_u64() -> Result<(), ()> {
471     /// #   let start_addr = GuestAddress(0x1000);
472     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
473     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
474     ///     assert_eq!(5, res);
475     ///     Ok(())
476     /// # }
477     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>478     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
479         let (mapping, offset, _) = self.find_region(guest_addr)?;
480         mapping
481             .write_slice(buf, offset)
482             .map_err(|e| Error::MemoryAccess(guest_addr, e))
483     }
484 
485     /// Writes the entire contents of a slice to guest memory at the specified
486     /// guest address.
487     ///
488     /// Returns an error if there isn't enough room in the memory region to
489     /// complete the entire write. Part of the data may have been written
490     /// nevertheless.
491     ///
492     /// # Examples
493     ///
494     /// ```
495     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
496     ///
497     /// fn test_write_all() -> guest_memory::Result<()> {
498     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
499     ///     let gm = GuestMemory::new(ranges)?;
500     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
501     /// }
502     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>503     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
504         let expected = buf.len();
505         let completed = self.write_at_addr(buf, guest_addr)?;
506         if expected == completed {
507             Ok(())
508         } else {
509             Err(Error::ShortWrite {
510                 expected,
511                 completed,
512             })
513         }
514     }
515 
516     /// Reads to a slice from guest memory at the specified guest address.
517     /// Returns the number of bytes read.  The number of bytes read can
518     /// be less than the length of the slice if there isn't enough room in the
519     /// memory region.
520     ///
521     /// # Examples
522     /// * Read a slice of length 16 at guestaddress 0x200.
523     ///
524     /// ```
525     /// # use base::MemoryMapping;
526     /// # use vm_memory::{GuestAddress, GuestMemory};
527     /// # fn test_write_u64() -> Result<(), ()> {
528     /// #   let start_addr = GuestAddress(0x1000);
529     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
530     ///     let buf = &mut [0u8; 16];
531     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
532     ///     assert_eq!(16, res);
533     ///     Ok(())
534     /// # }
535     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>536     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
537         let (mapping, offset, _) = self.find_region(guest_addr)?;
538         mapping
539             .read_slice(buf, offset)
540             .map_err(|e| Error::MemoryAccess(guest_addr, e))
541     }
542 
543     /// Reads from guest memory at the specified address to fill the entire
544     /// buffer.
545     ///
546     /// Returns an error if there isn't enough room in the memory region to fill
547     /// the entire buffer. Part of the buffer may have been filled nevertheless.
548     ///
549     /// # Examples
550     ///
551     /// ```
552     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
553     ///
554     /// fn test_read_exact() -> guest_memory::Result<()> {
555     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
556     ///     let gm = GuestMemory::new(ranges)?;
557     ///     let mut buffer = [0u8; 0x200];
558     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
559     /// }
560     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>561     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
562         let expected = buf.len();
563         let completed = self.read_at_addr(buf, guest_addr)?;
564         if expected == completed {
565             Ok(())
566         } else {
567             Err(Error::ShortRead {
568                 expected,
569                 completed,
570             })
571         }
572     }
573 
574     /// Reads an object from guest memory at the given guest address.
575     ///
576     /// # Examples
577     /// * Read a u64 from two areas of guest memory backed by separate mappings.
578     ///
579     /// ```
580     /// # use vm_memory::{GuestAddress, GuestMemory};
581     /// # fn test_read_u64() -> Result<u64, ()> {
582     /// #     let start_addr1 = GuestAddress(0x0);
583     /// #     let start_addr2 = GuestAddress(0x400);
584     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
585     /// #         .map_err(|_| ())?;
586     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
587     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
588     /// #     Ok(num1 + num2)
589     /// # }
590     /// ```
read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>591     pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
592         let (mapping, offset, _) = self.find_region(guest_addr)?;
593         mapping
594             .read_obj(offset)
595             .map_err(|e| Error::MemoryAccess(guest_addr, e))
596     }
597 
598     /// Reads an object from guest memory at the given guest address.
599     /// Reading from a volatile area isn't strictly safe as it could change
600     /// mid-read.  However, as long as the type T is plain old data and can
601     /// handle random initialization, everything will be OK.
602     ///
603     /// The read operation will be volatile, i.e. it will not be reordered by
604     /// the compiler and is suitable for I/O, but must be aligned. When reading
605     /// from regular memory, prefer [`GuestMemory::read_obj_from_addr`].
606     ///
607     /// # Examples
608     /// * Read a u64 from two areas of guest memory backed by separate mappings.
609     ///
610     /// ```
611     /// # use vm_memory::{GuestAddress, GuestMemory};
612     /// # fn test_read_u64() -> Result<u64, ()> {
613     /// #     let start_addr1 = GuestAddress(0x0);
614     /// #     let start_addr2 = GuestAddress(0x400);
615     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
616     /// #         .map_err(|_| ())?;
617     ///       let num1: u64 = gm.read_obj_from_addr_volatile(GuestAddress(32)).map_err(|_| ())?;
618     ///       let num2: u64 = gm.read_obj_from_addr_volatile(GuestAddress(0x400+32)).map_err(|_| ())?;
619     /// #     Ok(num1 + num2)
620     /// # }
621     /// ```
read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>622     pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
623         let (mapping, offset, _) = self.find_region(guest_addr)?;
624         mapping
625             .read_obj_volatile(offset)
626             .map_err(|e| Error::MemoryAccess(guest_addr, e))
627     }
628 
629     /// Writes an object to the memory region at the specified guest address.
630     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
631     ///
632     /// # Examples
633     /// * Write a u64 at guest address 0x1100.
634     ///
635     /// ```
636     /// # use vm_memory::{GuestAddress, GuestMemory};
637     /// # fn test_write_u64() -> Result<(), ()> {
638     /// #   let start_addr = GuestAddress(0x1000);
639     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
640     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
641     ///         .map_err(|_| ())
642     /// # }
643     /// ```
write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()>644     pub fn write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
645         let (mapping, offset, _) = self.find_region(guest_addr)?;
646         mapping
647             .write_obj(val, offset)
648             .map_err(|e| Error::MemoryAccess(guest_addr, e))
649     }
650 
651     /// Writes an object to the memory region at the specified guest address.
652     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
653     ///
654     /// The write operation will be volatile, i.e. it will not be reordered by
655     /// the compiler and is suitable for I/O, but must be aligned. When writing
656     /// to regular memory, prefer [`GuestMemory::write_obj_at_addr`].
657     /// # Examples
658     /// * Write a u64 at guest address 0x1100.
659     ///
660     /// ```
661     /// # use vm_memory::{GuestAddress, GuestMemory};
662     /// # fn test_write_u64() -> Result<(), ()> {
663     /// #   let start_addr = GuestAddress(0x1000);
664     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
665     ///     gm.write_obj_at_addr_volatile(55u64, GuestAddress(0x1100))
666     ///         .map_err(|_| ())
667     /// # }
668     /// ```
write_obj_at_addr_volatile<T: AsBytes>( &self, val: T, guest_addr: GuestAddress, ) -> Result<()>669     pub fn write_obj_at_addr_volatile<T: AsBytes>(
670         &self,
671         val: T,
672         guest_addr: GuestAddress,
673     ) -> Result<()> {
674         let (mapping, offset, _) = self.find_region(guest_addr)?;
675         mapping
676             .write_obj_volatile(val, offset)
677             .map_err(|e| Error::MemoryAccess(guest_addr, e))
678     }
679 
680     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
681     /// is not a subset of this `GuestMemory`.
682     ///
683     /// # Examples
684     /// * Write `99` to 30 bytes starting at guest address 0x1010.
685     ///
686     /// ```
687     /// # use base::MemoryMapping;
688     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
689     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
690     /// #   let start_addr = GuestAddress(0x1000);
691     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
692     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
693     ///     vslice.write_bytes(99);
694     /// #   Ok(())
695     /// # }
696     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>697     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
698         self.regions
699             .iter()
700             .find(|region| region.contains(addr))
701             .ok_or(Error::InvalidGuestAddress(addr))
702             .and_then(|region| {
703                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
704                 // it's not possible for a memory region to be larger than what fits in a usize.
705                 region
706                     .mapping
707                     .get_slice(addr.offset_from(region.start()) as usize, len)
708                     .map_err(Error::VolatileMemoryAccess)
709             })
710     }
711     /// Convert a GuestAddress into a pointer in the address space of this
712     /// process. This should only be necessary for giving addresses to the
713     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
714     /// be done through `write_obj_at_addr`, `read_obj_from_addr`, etc.
715     ///
716     /// # Arguments
717     /// * `guest_addr` - Guest address to convert.
718     ///
719     /// # Examples
720     ///
721     /// ```
722     /// # use vm_memory::{GuestAddress, GuestMemory};
723     /// # fn test_host_addr() -> Result<(), ()> {
724     ///     let start_addr = GuestAddress(0x1000);
725     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
726     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
727     ///     println!("Host address is {:p}", addr);
728     ///     Ok(())
729     /// # }
730     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>731     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
732         let (mapping, offset, _) = self.find_region(guest_addr)?;
733         Ok(
734             // SAFETY:
735             // This is safe; `find_region` already checks that offset is in
736             // bounds.
737             unsafe { mapping.as_ptr().add(offset) } as *const u8,
738         )
739     }
740 
741     /// Convert a GuestAddress into a pointer in the address space of this
742     /// process, and verify that the provided size define a valid range within
743     /// a single memory region. Similar to get_host_address(), this should only
744     /// be used for giving addresses to the kernel.
745     ///
746     /// # Arguments
747     /// * `guest_addr` - Guest address to convert.
748     /// * `size` - Size of the address range to be converted.
749     ///
750     /// # Examples
751     ///
752     /// ```
753     /// # use vm_memory::{GuestAddress, GuestMemory};
754     /// # fn test_host_addr() -> Result<(), ()> {
755     ///     let start_addr = GuestAddress(0x1000);
756     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
757     ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
758     ///     println!("Host address is {:p}", addr);
759     ///     Ok(())
760     /// # }
761     /// ```
get_host_address_range( &self, guest_addr: GuestAddress, size: usize, ) -> Result<*const u8>762     pub fn get_host_address_range(
763         &self,
764         guest_addr: GuestAddress,
765         size: usize,
766     ) -> Result<*const u8> {
767         if size == 0 {
768             return Err(Error::InvalidSize(size));
769         }
770 
771         // Assume no overlap among regions
772         let (mapping, offset, _) = self.find_region(guest_addr)?;
773 
774         if mapping
775             .size()
776             .checked_sub(offset)
777             .map_or(true, |v| v < size)
778         {
779             return Err(Error::InvalidGuestAddress(guest_addr));
780         }
781 
782         Ok(
783             //SAFETY:
784             // This is safe; `find_region` already checks that offset is in
785             // bounds.
786             unsafe { mapping.as_ptr().add(offset) } as *const u8,
787         )
788     }
789 
790     /// Returns a reference to the region that backs the given address.
shm_region( &self, guest_addr: GuestAddress, ) -> Result<&(dyn AsRawDescriptor + Send + Sync)>791     pub fn shm_region(
792         &self,
793         guest_addr: GuestAddress,
794     ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
795         self.regions
796             .iter()
797             .find(|region| region.contains(guest_addr))
798             .ok_or(Error::InvalidGuestAddress(guest_addr))
799             .map(|region| region.shared_obj.as_ref())
800     }
801 
802     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)>803     pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
804         self.shm_region(
805             self.checked_offset(self.regions[0].guest_base, offset)
806                 .ok_or(Error::InvalidOffset(offset))?,
807         )
808     }
809 
810     /// Loops over all guest memory regions of `self`, and returns the
811     /// target region that contains `guest_addr`. On success, this
812     /// function returns a tuple with the following fields:
813     ///
814     /// (i) the memory mapping associated with the target region.
815     /// (ii) the relative offset from the start of the target region to `guest_addr`.
816     /// (iii) the absolute offset from the start of the memory mapping to the target region.
817     ///
818     /// If no target region is found, an error is returned.
find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)>819     pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
820         self.regions
821             .iter()
822             .find(|region| region.contains(guest_addr))
823             .ok_or(Error::InvalidGuestAddress(guest_addr))
824             .map(|region| {
825                 (
826                     &region.mapping,
827                     guest_addr.offset_from(region.start()) as usize,
828                     region.obj_offset,
829                 )
830             })
831     }
832 
833     /// Convert a GuestAddress into an offset within the associated shm region.
834     ///
835     /// Due to potential gaps within GuestMemory, it is helpful to know the
836     /// offset within the shm where a given address is found. This offset
837     /// can then be passed to another process mapping the shm to read data
838     /// starting at that address.
839     ///
840     /// # Arguments
841     /// * `guest_addr` - Guest address to convert.
842     ///
843     /// # Examples
844     ///
845     /// ```
846     /// # use vm_memory::{GuestAddress, GuestMemory};
847     /// let addr_a = GuestAddress(0x10000);
848     /// let addr_b = GuestAddress(0x80000);
849     /// let mut gm = GuestMemory::new(&vec![
850     ///     (addr_a, 0x20000),
851     ///     (addr_b, 0x30000)]).expect("failed to create GuestMemory");
852     /// let offset = gm.offset_from_base(GuestAddress(0x95000))
853     ///                .expect("failed to get offset");
854     /// assert_eq!(offset, 0x35000);
855     /// ```
offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64>856     pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
857         self.regions
858             .iter()
859             .find(|region| region.contains(guest_addr))
860             .ok_or(Error::InvalidGuestAddress(guest_addr))
861             .map(|region| region.obj_offset + guest_addr.offset_from(region.start()))
862     }
863 
864     /// Copy all guest memory into `w`.
865     ///
866     /// # Safety
867     /// Must have exclusive access to the guest memory for the duration of the
868     /// call (e.g. all vCPUs and devices must be stopped).
869     ///
870     /// Returns a JSON object that contains metadata about the underlying memory regions to allow
871     /// validation checks at restore time.
872     #[deny(unsafe_op_in_unsafe_fn)]
snapshot<T: Write>( &self, w: &mut T, compress: bool, ) -> anyhow::Result<serde_json::Value>873     pub unsafe fn snapshot<T: Write>(
874         &self,
875         w: &mut T,
876         compress: bool,
877     ) -> anyhow::Result<serde_json::Value> {
878         fn go(
879             this: &GuestMemory,
880             w: &mut impl Write,
881         ) -> anyhow::Result<Vec<MemoryRegionSnapshotMetadata>> {
882             let mut regions = Vec::new();
883             for region in this.regions.iter() {
884                 let data_ranges = region
885                     .find_data_ranges()
886                     .context("find_data_ranges failed")?;
887                 for range in &data_ranges {
888                     let region_vslice = region
889                         .mapping
890                         .get_slice(range.start, range.end - range.start)?;
891                     // SAFETY:
892                     // 1. The data is guaranteed to be present & of expected length by the
893                     //    `VolatileSlice`.
894                     // 2. Aliasing the `VolatileSlice`'s memory is safe because a. The only mutable
895                     //    reference to it is held by the guest, and the guest's VCPUs are stopped
896                     //    (guaranteed by caller), so that mutable reference can be ignored (aliasing
897                     //    is only an issue if temporal overlap occurs, and it does not here). b.
898                     //    Some host code does manipulate guest memory through raw pointers. This
899                     //    aliases the underlying memory of the slice, so we must ensure that host
900                     //    code is not running (the caller guarantees this).
901                     w.write_all(unsafe {
902                         std::slice::from_raw_parts(region_vslice.as_ptr(), region_vslice.size())
903                     })?;
904                 }
905                 regions.push(MemoryRegionSnapshotMetadata {
906                     guest_base: region.guest_base.0,
907                     size: region.mapping.size(),
908                     data_ranges,
909                 });
910             }
911             Ok(regions)
912         }
913 
914         let regions = if compress {
915             let mut w = lz4_flex::frame::FrameEncoder::new(w);
916             let regions = go(self, &mut w)?;
917             w.finish()?;
918             regions
919         } else {
920             go(self, w)?
921         };
922 
923         Ok(serde_json::to_value(MemorySnapshotMetadata {
924             regions,
925             compressed: compress,
926         })?)
927     }
928 
929     /// Restore the guest memory using the bytes from `r`.
930     ///
931     /// # Safety
932     /// Must have exclusive access to the guest memory for the duration of the
933     /// call (e.g. all vCPUs and devices must be stopped).
934     ///
935     /// Returns an error if `metadata` doesn't match the configuration of the `GuestMemory` or if
936     /// `r` doesn't produce exactly as many bytes as needed.
937     #[deny(unsafe_op_in_unsafe_fn)]
restore<T: Read>( &self, metadata: serde_json::Value, r: &mut T, ) -> anyhow::Result<()>938     pub unsafe fn restore<T: Read>(
939         &self,
940         metadata: serde_json::Value,
941         r: &mut T,
942     ) -> anyhow::Result<()> {
943         let metadata: MemorySnapshotMetadata = serde_json::from_value(metadata)?;
944 
945         let mut r: Box<dyn Read> = if metadata.compressed {
946             Box::new(lz4_flex::frame::FrameDecoder::new(r))
947         } else {
948             Box::new(r)
949         };
950 
951         if self.regions.len() != metadata.regions.len() {
952             bail!(
953                 "snapshot expected {} memory regions but VM has {}",
954                 metadata.regions.len(),
955                 self.regions.len()
956             );
957         }
958         for (region, metadata) in self.regions.iter().zip(metadata.regions.iter()) {
959             let MemoryRegionSnapshotMetadata {
960                 guest_base,
961                 size,
962                 data_ranges,
963             } = metadata;
964             if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
965                 bail!("snapshot memory regions don't match VM memory regions");
966             }
967 
968             let mut prev_end = 0;
969             for range in data_ranges {
970                 let hole_size = range
971                     .start
972                     .checked_sub(prev_end)
973                     .context("invalid data range")?;
974                 if hole_size > 0 {
975                     region.zero_range(prev_end, hole_size)?;
976                 }
977                 let region_vslice = region
978                     .mapping
979                     .get_slice(range.start, range.end - range.start)?;
980 
981                 // SAFETY:
982                 // See `Self::snapshot` for the detailed safety statement, and
983                 // note that both mutable and non-mutable aliasing is safe.
984                 r.read_exact(unsafe {
985                     std::slice::from_raw_parts_mut(region_vslice.as_mut_ptr(), region_vslice.size())
986                 })?;
987 
988                 prev_end = range.end;
989             }
990             let hole_size = region
991                 .mapping
992                 .size()
993                 .checked_sub(prev_end)
994                 .context("invalid data range")?;
995             if hole_size > 0 {
996                 region.zero_range(prev_end, hole_size)?;
997             }
998         }
999 
1000         // Should always be at EOF at this point.
1001         let mut buf = [0];
1002         if r.read(&mut buf)? != 0 {
1003             bail!("too many bytes");
1004         }
1005 
1006         Ok(())
1007     }
1008 }
1009 
1010 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1011 struct MemorySnapshotMetadata {
1012     regions: Vec<MemoryRegionSnapshotMetadata>,
1013     compressed: bool,
1014 }
1015 
1016 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1017 struct MemoryRegionSnapshotMetadata {
1018     guest_base: u64,
1019     size: usize,
1020     // Ranges of the mmap that are stored in the snapshot file. All other ranges of the region are
1021     // zeros.
1022     data_ranges: Vec<std::ops::Range<usize>>,
1023 }
1024 
1025 // SAFETY:
1026 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
1027 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>1028     fn get_volatile_slice(
1029         &self,
1030         mem_range: cros_async::MemRegion,
1031     ) -> mem::Result<VolatileSlice<'_>> {
1032         self.get_slice_at_addr(GuestAddress(mem_range.offset), mem_range.len)
1033             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1034     }
1035 }
1036 
1037 #[cfg(test)]
1038 mod tests {
1039     use super::*;
1040 
1041     #[test]
test_alignment()1042     fn test_alignment() {
1043         let start_addr1 = GuestAddress(0x0);
1044         let start_addr2 = GuestAddress(0x10000);
1045 
1046         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1047         assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1048     }
1049 
1050     #[test]
two_regions()1051     fn two_regions() {
1052         let start_addr1 = GuestAddress(0x0);
1053         let start_addr2 = GuestAddress(0x10000);
1054         // The memory regions are `[0x0, 0x10000)`, `[0x10000, 0x20000)`.
1055         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1056 
1057         // Although each address in `[0x0, 0x20000)` is valid, `is_valid_range()` returns false for
1058         // a range that is across multiple underlying regions.
1059         assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1060         assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1061         assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1062     }
1063 
1064     #[test]
overlap_memory()1065     fn overlap_memory() {
1066         let start_addr1 = GuestAddress(0x0);
1067         let start_addr2 = GuestAddress(0x10000);
1068         assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1069     }
1070 
1071     #[test]
region_hole()1072     fn region_hole() {
1073         let start_addr1 = GuestAddress(0x0);
1074         let start_addr2 = GuestAddress(0x40000);
1075         // The memory regions are `[0x0, 0x20000)`, `[0x40000, 0x60000)`.
1076         let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1077 
1078         assert!(gm.address_in_range(GuestAddress(0x10000)));
1079         assert!(!gm.address_in_range(GuestAddress(0x30000)));
1080         assert!(gm.address_in_range(GuestAddress(0x50000)));
1081         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1082         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1083         assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1084         assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1085         assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1086         assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1087         assert_eq!(
1088             gm.checked_offset(GuestAddress(0x50000), 0x8000),
1089             Some(GuestAddress(0x58000))
1090         );
1091         assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1092         assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1093         assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1094         assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1095 
1096         // While `checked_offset(GuestAddress(0x10000), 0x40000)` succeeds because 0x50000 is a
1097         // valid address, `is_valid_range(GuestAddress(0x10000), 0x40000)` returns `false`
1098         // because there is a hole inside of [0x10000, 0x50000).
1099         assert_eq!(
1100             gm.checked_offset(GuestAddress(0x10000), 0x40000),
1101             Some(GuestAddress(0x50000))
1102         );
1103         assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1104     }
1105 
1106     #[test]
test_read_u64()1107     fn test_read_u64() {
1108         let start_addr1 = GuestAddress(0x0);
1109         let start_addr2 = GuestAddress(0x10000);
1110         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1111 
1112         let val1: u64 = 0xaa55aa55aa55aa55;
1113         let val2: u64 = 0x55aa55aa55aa55aa;
1114         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1115         gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1116             .unwrap();
1117         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1118         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1119         assert_eq!(val1, num1);
1120         assert_eq!(val2, num2);
1121     }
1122 
1123     #[test]
test_memory_size()1124     fn test_memory_size() {
1125         let start_region1 = GuestAddress(0x0);
1126         let size_region1 = 0x10000;
1127         let start_region2 = GuestAddress(0x10000);
1128         let size_region2 = 0x20000;
1129         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1130             .unwrap();
1131 
1132         let mem_size = gm.memory_size();
1133         assert_eq!(mem_size, size_region1 + size_region2);
1134     }
1135 
1136     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>1137     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1138         Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1139     }
1140 
1141     #[test]
guest_to_host()1142     fn guest_to_host() {
1143         let start_addr1 = GuestAddress(0x0);
1144         let start_addr2 = GuestAddress(0x10000);
1145         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1146 
1147         // Verify the host addresses match what we expect from the mappings.
1148         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1149         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1150         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1151         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1152         assert_eq!(host_addr1, addr1_base);
1153         assert_eq!(host_addr2, addr2_base);
1154 
1155         // Check that a bad address returns an error.
1156         let bad_addr = GuestAddress(0x123456);
1157         assert!(mem.get_host_address(bad_addr).is_err());
1158     }
1159 
1160     #[test]
guest_to_host_range()1161     fn guest_to_host_range() {
1162         let start_addr1 = GuestAddress(0x0);
1163         let start_addr2 = GuestAddress(0x10000);
1164         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1165 
1166         // Verify the host addresses match what we expect from the mappings.
1167         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1168         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1169         let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1170         let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1171         assert_eq!(host_addr1, addr1_base);
1172         assert_eq!(host_addr2, addr2_base);
1173 
1174         let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1175         assert_eq!(host_addr3, addr2_base);
1176 
1177         // Check that a valid guest address with an invalid size returns an error.
1178         assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1179 
1180         // Check that a bad address returns an error.
1181         let bad_addr = GuestAddress(0x123456);
1182         assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1183     }
1184 
1185     #[test]
shm_offset()1186     fn shm_offset() {
1187         let start_region1 = GuestAddress(0x0);
1188         let size_region1 = 0x10000;
1189         let start_region2 = GuestAddress(0x10000);
1190         let size_region2 = 0x20000;
1191         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1192             .unwrap();
1193 
1194         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1195         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1196             .unwrap();
1197 
1198         for region in gm.regions() {
1199             let shm = match region.shm {
1200                 BackingObject::Shm(s) => s,
1201                 _ => {
1202                     panic!("backing object isn't SharedMemory");
1203                 }
1204             };
1205             let mmap = MemoryMappingBuilder::new(region.size)
1206                 .from_shared_memory(shm)
1207                 .offset(region.shm_offset)
1208                 .build()
1209                 .unwrap();
1210 
1211             if region.index == 0 {
1212                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1213             }
1214 
1215             if region.index == 1 {
1216                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1217             }
1218         }
1219     }
1220 
1221     #[test]
1222     // Disabled for non-x86 because test infra uses qemu-user, which doesn't support MADV_REMOVE.
1223     #[cfg(target_arch = "x86_64")]
snapshot_restore()1224     fn snapshot_restore() {
1225         let regions = &[
1226             // Hole at start.
1227             (GuestAddress(0x0), 0x10000),
1228             // Hole at end.
1229             (GuestAddress(0x10000), 0x10000),
1230             // Hole in middle.
1231             (GuestAddress(0x20000), 0x10000),
1232             // All holes.
1233             (GuestAddress(0x30000), 0x10000),
1234             // No holes.
1235             (GuestAddress(0x40000), 0x1000),
1236         ];
1237         let writes = &[
1238             (GuestAddress(0x0FFF0), 1u64),
1239             (GuestAddress(0x10000), 2u64),
1240             (GuestAddress(0x29000), 3u64),
1241             (GuestAddress(0x40000), 4u64),
1242         ];
1243 
1244         let gm = GuestMemory::new(regions).unwrap();
1245         for &(addr, value) in writes {
1246             gm.write_obj_at_addr(value, addr).unwrap();
1247         }
1248 
1249         let mut data = tempfile::tempfile().unwrap();
1250         // SAFETY:
1251         // no vm is running
1252         let metadata_json = unsafe { gm.snapshot(&mut data, false).unwrap() };
1253         let metadata: MemorySnapshotMetadata =
1254             serde_json::from_value(metadata_json.clone()).unwrap();
1255 
1256         #[cfg(unix)]
1257         assert_eq!(
1258             metadata,
1259             MemorySnapshotMetadata {
1260                 regions: vec![
1261                     MemoryRegionSnapshotMetadata {
1262                         guest_base: 0,
1263                         size: 0x10000,
1264                         data_ranges: vec![0x0F000..0x10000],
1265                     },
1266                     MemoryRegionSnapshotMetadata {
1267                         guest_base: 0x10000,
1268                         size: 0x10000,
1269                         data_ranges: vec![0x00000..0x01000],
1270                     },
1271                     MemoryRegionSnapshotMetadata {
1272                         guest_base: 0x20000,
1273                         size: 0x10000,
1274                         data_ranges: vec![0x09000..0x0A000],
1275                     },
1276                     MemoryRegionSnapshotMetadata {
1277                         guest_base: 0x30000,
1278                         size: 0x10000,
1279                         data_ranges: vec![],
1280                     },
1281                     MemoryRegionSnapshotMetadata {
1282                         guest_base: 0x40000,
1283                         size: 0x1000,
1284                         data_ranges: vec![0x00000..0x01000],
1285                     }
1286                 ],
1287                 compressed: false,
1288             }
1289         );
1290         // We can't detect the holes on Windows yet.
1291         #[cfg(windows)]
1292         assert_eq!(
1293             metadata,
1294             MemorySnapshotMetadata {
1295                 regions: vec![
1296                     MemoryRegionSnapshotMetadata {
1297                         guest_base: 0,
1298                         size: 0x10000,
1299                         data_ranges: vec![0x00000..0x10000],
1300                     },
1301                     MemoryRegionSnapshotMetadata {
1302                         guest_base: 0x10000,
1303                         size: 0x10000,
1304                         data_ranges: vec![0x00000..0x10000],
1305                     },
1306                     MemoryRegionSnapshotMetadata {
1307                         guest_base: 0x20000,
1308                         size: 0x10000,
1309                         data_ranges: vec![0x00000..0x10000],
1310                     },
1311                     MemoryRegionSnapshotMetadata {
1312                         guest_base: 0x30000,
1313                         size: 0x10000,
1314                         data_ranges: vec![0x00000..0x10000],
1315                     },
1316                     MemoryRegionSnapshotMetadata {
1317                         guest_base: 0x40000,
1318                         size: 0x1000,
1319                         data_ranges: vec![0x00000..0x01000],
1320                     }
1321                 ],
1322                 compressed: false,
1323             }
1324         );
1325 
1326         std::mem::drop(gm);
1327 
1328         let gm2 = GuestMemory::new(regions).unwrap();
1329 
1330         // Write to a hole so we can assert the restore zeroes it.
1331         let hole_addr = GuestAddress(0x30000);
1332         gm2.write_obj_at_addr(8u64, hole_addr).unwrap();
1333 
1334         use std::io::Seek;
1335         data.seek(std::io::SeekFrom::Start(0)).unwrap();
1336         // SAFETY:
1337         // no vm is running
1338         unsafe { gm2.restore(metadata_json, &mut data).unwrap() };
1339 
1340         assert_eq!(gm2.read_obj_from_addr::<u64>(hole_addr).unwrap(), 0);
1341         for &(addr, value) in writes {
1342             assert_eq!(gm2.read_obj_from_addr::<u64>(addr).unwrap(), value);
1343         }
1344     }
1345 }
1346