1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 //
3 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 //
5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE-BSD-3-Clause file.
8 //
9 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10 
11 //! Traits to track and access the physical memory of the guest.
12 //!
13 //! To make the abstraction as generic as possible, all the core traits declared here only define
14 //! methods to access guest's memory, and never define methods to manage (create, delete, insert,
15 //! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers,
16 //! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
17 //! a hypervisor).
18 //!
19 //! Traits and Structs
20 //! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
21 //! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
22 //! region.
23 //! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
24 //! physical memory.
25 //! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
26 //! objects.
27 //! The main responsibilities of the `GuestMemory` trait are:
28 //!     - hide the detail of accessing guest's physical address.
29 //!     - map a request address to a `GuestMemoryRegion` object and relay the request to it.
30 //!     - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
31 //!
32 //! Whenever a collection of `GuestMemoryRegion` objects is mutable,
33 //! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented
34 //! for clients to obtain a [`GuestMemory`] reference or smart pointer.
35 //!
36 //! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle
37 //! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is
38 //! actually performed at all). Those that do implement tracking functionality are expected to
39 //! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly
40 //! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed
41 //! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`,
42 //! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`.
43 
44 use std::convert::From;
45 use std::fs::File;
46 use std::io::{self, Read, Write};
47 use std::ops::{BitAnd, BitOr, Deref};
48 use std::rc::Rc;
49 use std::sync::atomic::Ordering;
50 use std::sync::Arc;
51 
52 use crate::address::{Address, AddressValue};
53 use crate::bitmap::{Bitmap, BS, MS};
54 use crate::bytes::{AtomicAccess, Bytes};
55 use crate::volatile_memory::{self, VolatileSlice};
56 
57 static MAX_ACCESS_CHUNK: usize = 4096;
58 
59 /// Errors associated with handling guest memory accesses.
60 #[allow(missing_docs)]
61 #[derive(Debug, thiserror::Error)]
62 pub enum Error {
63     /// Failure in finding a guest address in any memory regions mapped by this guest.
64     #[error("Guest memory error: invalid guest address {}",.0.raw_value())]
65     InvalidGuestAddress(GuestAddress),
66     /// Couldn't read/write from the given source.
67     #[error("Guest memory error: {0}")]
68     IOError(io::Error),
69     /// Incomplete read or write.
70     #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")]
71     PartialBuffer { expected: usize, completed: usize },
72     /// Requested backend address is out of range.
73     #[error("Guest memory error: invalid backend address")]
74     InvalidBackendAddress,
75     /// Host virtual address not available.
76     #[error("Guest memory error: host virtual address not available")]
77     HostAddressNotAvailable,
78 }
79 
80 impl From<volatile_memory::Error> for Error {
from(e: volatile_memory::Error) -> Self81     fn from(e: volatile_memory::Error) -> Self {
82         match e {
83             volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
84             volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
85             volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
86             volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
87             volatile_memory::Error::IOError(e) => Error::IOError(e),
88             volatile_memory::Error::PartialBuffer {
89                 expected,
90                 completed,
91             } => Error::PartialBuffer {
92                 expected,
93                 completed,
94             },
95         }
96     }
97 }
98 
99 /// Result of guest memory operations.
100 pub type Result<T> = std::result::Result<T, Error>;
101 
102 /// Represents a guest physical address (GPA).
103 ///
104 /// # Notes:
105 /// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
106 /// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
107 /// machine.
108 #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
109 pub struct GuestAddress(pub u64);
110 impl_address_ops!(GuestAddress, u64);
111 
112 /// Represents an offset inside a region.
113 #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
114 pub struct MemoryRegionAddress(pub u64);
115 impl_address_ops!(MemoryRegionAddress, u64);
116 
117 /// Type of the raw value stored in a `GuestAddress` object.
118 pub type GuestUsize = <GuestAddress as AddressValue>::V;
119 
120 /// Represents the start point within a `File` that backs a `GuestMemoryRegion`.
121 #[derive(Clone, Debug)]
122 pub struct FileOffset {
123     file: Arc<File>,
124     start: u64,
125 }
126 
127 impl FileOffset {
128     /// Creates a new `FileOffset` object.
new(file: File, start: u64) -> Self129     pub fn new(file: File, start: u64) -> Self {
130         FileOffset::from_arc(Arc::new(file), start)
131     }
132 
133     /// Creates a new `FileOffset` object based on an exiting `Arc<File>`.
from_arc(file: Arc<File>, start: u64) -> Self134     pub fn from_arc(file: Arc<File>, start: u64) -> Self {
135         FileOffset { file, start }
136     }
137 
138     /// Returns a reference to the inner `File` object.
file(&self) -> &File139     pub fn file(&self) -> &File {
140         self.file.as_ref()
141     }
142 
143     /// Return a reference to the inner `Arc<File>` object.
arc(&self) -> &Arc<File>144     pub fn arc(&self) -> &Arc<File> {
145         &self.file
146     }
147 
148     /// Returns the start offset within the file.
start(&self) -> u64149     pub fn start(&self) -> u64 {
150         self.start
151     }
152 }
153 
154 /// Represents a continuous region of guest physical memory.
155 #[allow(clippy::len_without_is_empty)]
156 pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
157     /// Type used for dirty memory tracking.
158     type B: Bitmap;
159 
160     /// Returns the size of the region.
len(&self) -> GuestUsize161     fn len(&self) -> GuestUsize;
162 
163     /// Returns the minimum (inclusive) address managed by the region.
start_addr(&self) -> GuestAddress164     fn start_addr(&self) -> GuestAddress;
165 
166     /// Returns the maximum (inclusive) address managed by the region.
last_addr(&self) -> GuestAddress167     fn last_addr(&self) -> GuestAddress {
168         // unchecked_add is safe as the region bounds were checked when it was created.
169         self.start_addr().unchecked_add(self.len() - 1)
170     }
171 
172     /// Borrow the associated `Bitmap` object.
bitmap(&self) -> &Self::B173     fn bitmap(&self) -> &Self::B;
174 
175     /// Returns the given address if it is within this region.
check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress>176     fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
177         if self.address_in_range(addr) {
178             Some(addr)
179         } else {
180             None
181         }
182     }
183 
184     /// Returns `true` if the given address is within this region.
address_in_range(&self, addr: MemoryRegionAddress) -> bool185     fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
186         addr.raw_value() < self.len()
187     }
188 
189     /// Returns the address plus the offset if it is in this region.
checked_offset( &self, base: MemoryRegionAddress, offset: usize, ) -> Option<MemoryRegionAddress>190     fn checked_offset(
191         &self,
192         base: MemoryRegionAddress,
193         offset: usize,
194     ) -> Option<MemoryRegionAddress> {
195         base.checked_add(offset as u64)
196             .and_then(|addr| self.check_address(addr))
197     }
198 
199     /// Tries to convert an absolute address to a relative address within this region.
200     ///
201     /// Returns `None` if `addr` is out of the bounds of this region.
to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress>202     fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
203         addr.checked_offset_from(self.start_addr())
204             .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
205     }
206 
207     /// Returns the host virtual address corresponding to the region address.
208     ///
209     /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
210     /// have the capability to mmap guest address range into host virtual address space for
211     /// direct access, so the corresponding host virtual address may be passed to other subsystems.
212     ///
213     /// # Note
214     /// The underlying guest memory is not protected from memory aliasing, which breaks the
215     /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
216     /// concurrent accesses to the underlying guest memory.
get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8>217     fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
218         Err(Error::HostAddressNotAvailable)
219     }
220 
221     /// Returns information regarding the file and offset backing this memory region.
file_offset(&self) -> Option<&FileOffset>222     fn file_offset(&self) -> Option<&FileOffset> {
223         None
224     }
225 
226     /// Returns a slice corresponding to the data in the region.
227     ///
228     /// Returns `None` if the region does not support slice-based access.
229     ///
230     /// # Safety
231     ///
232     /// Unsafe because of possible aliasing.
233     #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
234     machine without violating aliasing rules "]
as_slice(&self) -> Option<&[u8]>235     unsafe fn as_slice(&self) -> Option<&[u8]> {
236         None
237     }
238 
239     /// Returns a mutable slice corresponding to the data in the region.
240     ///
241     /// Returns `None` if the region does not support slice-based access.
242     ///
243     /// # Safety
244     ///
245     /// Unsafe because of possible aliasing. Mutable accesses performed through the
246     /// returned slice are not visible to the dirty bitmap tracking functionality of
247     /// the region, and must be manually recorded using the associated bitmap object.
248     #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
249     machine without violating aliasing rules "]
as_mut_slice(&self) -> Option<&mut [u8]>250     unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
251         None
252     }
253 
254     /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
255     /// `offset`.
256     #[allow(unused_variables)]
get_slice( &self, offset: MemoryRegionAddress, count: usize, ) -> Result<VolatileSlice<BS<Self::B>>>257     fn get_slice(
258         &self,
259         offset: MemoryRegionAddress,
260         count: usize,
261     ) -> Result<VolatileSlice<BS<Self::B>>> {
262         Err(Error::HostAddressNotAvailable)
263     }
264 
265     /// Gets a slice of memory for the entire region that supports volatile access.
266     ///
267     /// # Examples (uses the `backend-mmap` feature)
268     ///
269     /// ```
270     /// # #[cfg(feature = "backend-mmap")]
271     /// # {
272     /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
273     /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
274     /// #
275     /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None)
276     ///     .expect("Could not create guest memory");
277     /// let slice = region
278     ///     .as_volatile_slice()
279     ///     .expect("Could not get volatile slice");
280     ///
281     /// let v = 42u32;
282     /// let r = slice
283     ///     .get_ref::<u32>(0x200)
284     ///     .expect("Could not get reference");
285     /// r.store(v);
286     /// assert_eq!(r.load(), v);
287     /// # }
288     /// ```
as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>>289     fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
290         self.get_slice(MemoryRegionAddress(0), self.len() as usize)
291     }
292 
293     /// Show if the region is based on the `HugeTLBFS`.
294     /// Returns Some(true) if the region is backed by hugetlbfs.
295     /// None represents that no information is available.
296     ///
297     /// # Examples (uses the `backend-mmap` feature)
298     ///
299     /// ```
300     /// # #[cfg(feature = "backend-mmap")]
301     /// # {
302     /// #   use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
303     /// let addr = GuestAddress(0x1000);
304     /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
305     /// let r = mem.find_region(addr).unwrap();
306     /// assert_eq!(r.is_hugetlbfs(), None);
307     /// # }
308     /// ```
309     #[cfg(target_os = "linux")]
is_hugetlbfs(&self) -> Option<bool>310     fn is_hugetlbfs(&self) -> Option<bool> {
311         None
312     }
313 }
314 
315 /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object.
316 /// The vm-memory crate already provides trivial implementation for
317 /// references to `GuestMemory` or reference-counted `GuestMemory` objects,
318 /// but the trait can also be implemented by any other struct in order
319 /// to provide temporary access to a snapshot of the memory map.
320 ///
321 /// In order to support generic mutable memory maps, devices (or other things
322 /// that access memory) should store the memory as a `GuestAddressSpace<M>`.
323 /// This example shows that references can also be used as the `GuestAddressSpace`
324 /// implementation, providing a zero-cost abstraction whenever immutable memory
325 /// maps are sufficient.
326 ///
327 /// # Examples (uses the `backend-mmap` and `backend-atomic` features)
328 ///
329 /// ```
330 /// # #[cfg(feature = "backend-mmap")]
331 /// # {
332 /// # use std::sync::Arc;
333 /// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap};
334 /// #
335 /// pub struct VirtioDevice<AS: GuestAddressSpace> {
336 ///     mem: Option<AS>,
337 /// }
338 ///
339 /// impl<AS: GuestAddressSpace> VirtioDevice<AS> {
340 ///     fn new() -> Self {
341 ///         VirtioDevice { mem: None }
342 ///     }
343 ///     fn activate(&mut self, mem: AS) {
344 ///         self.mem = Some(mem)
345 ///     }
346 /// }
347 ///
348 /// fn get_mmap() -> GuestMemoryMmap<()> {
349 ///     let start_addr = GuestAddress(0x1000);
350 ///     GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)])
351 ///         .expect("Could not create guest memory")
352 /// }
353 ///
354 /// // Using `VirtioDevice` with an immutable GuestMemoryMmap:
355 /// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new();
356 /// let mmap = get_mmap();
357 /// for_immutable_mmap.activate(&mmap);
358 /// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new();
359 /// another.activate(&mmap);
360 ///
361 /// # #[cfg(feature = "backend-atomic")]
362 /// # {
363 /// # use vm_memory::GuestMemoryAtomic;
364 /// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
365 /// let mut for_mutable_mmap = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
366 /// let atomic = GuestMemoryAtomic::new(get_mmap());
367 /// for_mutable_mmap.activate(atomic.clone());
368 /// let mut another = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
369 /// another.activate(atomic.clone());
370 ///
371 /// // atomic can be modified here...
372 /// # }
373 /// # }
374 /// ```
375 pub trait GuestAddressSpace {
376     /// The type that will be used to access guest memory.
377     type M: GuestMemory;
378 
379     /// A type that provides access to the memory.
380     type T: Clone + Deref<Target = Self::M>;
381 
382     /// Return an object (e.g. a reference or guard) that can be used
383     /// to access memory through this address space.  The object provides
384     /// a consistent snapshot of the memory map.
memory(&self) -> Self::T385     fn memory(&self) -> Self::T;
386 }
387 
388 impl<M: GuestMemory> GuestAddressSpace for &M {
389     type M = M;
390     type T = Self;
391 
memory(&self) -> Self392     fn memory(&self) -> Self {
393         self
394     }
395 }
396 
397 impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
398     type M = M;
399     type T = Self;
400 
memory(&self) -> Self401     fn memory(&self) -> Self {
402         self.clone()
403     }
404 }
405 
406 impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
407     type M = M;
408     type T = Self;
409 
memory(&self) -> Self410     fn memory(&self) -> Self {
411         self.clone()
412     }
413 }
414 
415 /// Lifetime generic associated iterators. The actual iterator type is defined through associated
416 /// item `Iter`, for example:
417 ///
418 /// ```
419 /// # use std::marker::PhantomData;
420 /// # use vm_memory::guest_memory::GuestMemoryIterator;
421 /// #
422 /// // Declare the relevant Region and Memory types
423 /// struct MyGuestRegion {/* fields omitted */}
424 /// struct MyGuestMemory {/* fields omitted */}
425 ///
426 /// // Make an Iterator type to iterate over the Regions
427 /// # /*
428 /// struct MyGuestMemoryIter<'a> {/* fields omitted */}
429 /// # */
430 /// # struct MyGuestMemoryIter<'a> {
431 /// #   _marker: PhantomData<&'a MyGuestRegion>,
432 /// # }
433 /// impl<'a> Iterator for MyGuestMemoryIter<'a> {
434 ///     type Item = &'a MyGuestRegion;
435 ///     fn next(&mut self) -> Option<&'a MyGuestRegion> {
436 ///         // ...
437 /// #       None
438 ///     }
439 /// }
440 ///
441 /// // Associate the Iter type with the Memory type
442 /// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory {
443 ///     type Iter = MyGuestMemoryIter<'a>;
444 /// }
445 /// ```
446 pub trait GuestMemoryIterator<'a, R: 'a> {
447     /// Type of the `iter` method's return value.
448     type Iter: Iterator<Item = &'a R>;
449 }
450 
451 /// `GuestMemory` represents a container for an *immutable* collection of
452 /// `GuestMemoryRegion` objects.  `GuestMemory` provides the `Bytes<GuestAddress>`
453 /// trait to hide the details of accessing guest memory by physical address.
454 /// Interior mutability is not allowed for implementations of `GuestMemory` so
455 /// that they always provide a consistent view of the memory map.
456 ///
457 /// The task of the `GuestMemory` trait are:
458 /// - map a request address to a `GuestMemoryRegion` object and relay the request to it.
459 /// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
460 pub trait GuestMemory {
461     /// Type of objects hosted by the address space.
462     type R: GuestMemoryRegion;
463 
464     /// Lifetime generic associated iterators. Usually this is just `Self`.
465     type I: for<'a> GuestMemoryIterator<'a, Self::R>;
466 
467     /// Returns the number of regions in the collection.
num_regions(&self) -> usize468     fn num_regions(&self) -> usize;
469 
470     /// Returns the region containing the specified address or `None`.
find_region(&self, addr: GuestAddress) -> Option<&Self::R>471     fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
472 
473     /// Perform the specified action on each region.
474     ///
475     /// It only walks children of current region and does not step into sub regions.
476     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E> where F: Fn(usize, &Self::R) -> std::result::Result<(), E>,477     fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
478     where
479         F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
480     {
481         for (index, region) in self.iter().enumerate() {
482             cb(index, region)?;
483         }
484         Ok(())
485     }
486 
487     /// Perform the specified action on each region mutably.
488     ///
489     /// It only walks children of current region and does not step into sub regions.
490     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E> where F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,491     fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
492     where
493         F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
494     {
495         for (index, region) in self.iter().enumerate() {
496             cb(index, region)?;
497         }
498         Ok(())
499     }
500 
501     /// Gets an iterator over the entries in the collection.
502     ///
503     /// # Examples
504     ///
505     /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
506     ///   and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
507     ///  `backend-mmap` feature)
508     ///
509     /// ```
510     /// # #[cfg(feature = "backend-mmap")]
511     /// # {
512     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
513     /// #
514     /// let start_addr1 = GuestAddress(0x0);
515     /// let start_addr2 = GuestAddress(0x400);
516     /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
517     ///     .expect("Could not create guest memory");
518     ///
519     /// let total_size = gm
520     ///     .iter()
521     ///     .map(|region| region.len() / 1024)
522     ///     .fold(0, |acc, size| acc + size);
523     /// assert_eq!(3, total_size)
524     /// # }
525     /// ```
iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter526     fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
527 
528     /// Applies two functions, specified as callbacks, on the inner memory regions.
529     ///
530     /// # Arguments
531     /// * `init` - Starting value of the accumulator for the `foldf` function.
532     /// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
533     ///            the same size as the memory regions array, containing the function's results
534     ///            for each region.
535     /// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
536     ///             operator, applying itself to the `init` value and to each subsequent elemnent
537     ///             in the array returned by `mapf`.
538     ///
539     /// # Examples
540     ///
541     /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
542     ///   and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
543     ///  `backend-mmap` feature)
544     ///
545     /// ```
546     /// # #[cfg(feature = "backend-mmap")]
547     /// # {
548     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
549     /// #
550     /// let start_addr1 = GuestAddress(0x0);
551     /// let start_addr2 = GuestAddress(0x400);
552     /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
553     ///     .expect("Could not create guest memory");
554     ///
555     /// let total_size = gm.map_and_fold(0, |(_, region)| region.len() / 1024, |acc, size| acc + size);
556     /// assert_eq!(3, total_size)
557     /// # }
558     /// ```
559     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T where F: Fn((usize, &Self::R)) -> T, G: Fn(T, T) -> T,560     fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
561     where
562         F: Fn((usize, &Self::R)) -> T,
563         G: Fn(T, T) -> T,
564     {
565         self.iter().enumerate().map(mapf).fold(init, foldf)
566     }
567 
568     /// Returns the maximum (inclusive) address managed by the
569     /// [`GuestMemory`](trait.GuestMemory.html).
570     ///
571     /// # Examples (uses the `backend-mmap` feature)
572     ///
573     /// ```
574     /// # #[cfg(feature = "backend-mmap")]
575     /// # {
576     /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap};
577     /// #
578     /// let start_addr = GuestAddress(0x1000);
579     /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
580     ///     .expect("Could not create guest memory");
581     ///
582     /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr()));
583     /// # }
584     /// ```
last_addr(&self) -> GuestAddress585     fn last_addr(&self) -> GuestAddress {
586         self.iter()
587             .map(GuestMemoryRegion::last_addr)
588             .fold(GuestAddress(0), std::cmp::max)
589     }
590 
591     /// Tries to convert an absolute address to a relative address within the corresponding region.
592     ///
593     /// Returns `None` if `addr` isn't present within the memory of the guest.
to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)>594     fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
595         self.find_region(addr)
596             .map(|r| (r, r.to_region_addr(addr).unwrap()))
597     }
598 
599     /// Returns `true` if the given address is present within the memory of the guest.
address_in_range(&self, addr: GuestAddress) -> bool600     fn address_in_range(&self, addr: GuestAddress) -> bool {
601         self.find_region(addr).is_some()
602     }
603 
604     /// Returns the given address if it is present within the memory of the guest.
check_address(&self, addr: GuestAddress) -> Option<GuestAddress>605     fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
606         self.find_region(addr).map(|_| addr)
607     }
608 
609     /// Check whether the range [base, base + len) is valid.
check_range(&self, base: GuestAddress, len: usize) -> bool610     fn check_range(&self, base: GuestAddress, len: usize) -> bool {
611         match self.try_access(len, base, |_, count, _, _| -> Result<usize> { Ok(count) }) {
612             Ok(count) => count == len,
613             _ => false,
614         }
615     }
616 
617     /// Returns the address plus the offset if it is present within the memory of the guest.
checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress>618     fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
619         base.checked_add(offset as u64)
620             .and_then(|addr| self.check_address(addr))
621     }
622 
623     /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`.
624     ///
625     /// The address range `[addr, addr + count)` may span more than one
626     /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it.
627     /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f'
628     /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns:
629     /// - the error code returned by the callback 'f'
630     /// - the size of the already handled data when encountering the first hole
631     /// - the size of the already handled data when the whole range has been handled
try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize> where F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,632     fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
633     where
634         F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
635     {
636         let mut cur = addr;
637         let mut total = 0;
638         while let Some(region) = self.find_region(cur) {
639             let start = region.to_region_addr(cur).unwrap();
640             let cap = region.len() - start.raw_value();
641             let len = std::cmp::min(cap, (count - total) as GuestUsize);
642             match f(total, len as usize, start, region) {
643                 // no more data
644                 Ok(0) => return Ok(total),
645                 // made some progress
646                 Ok(len) => {
647                     total += len;
648                     if total == count {
649                         break;
650                     }
651                     cur = match cur.overflowing_add(len as GuestUsize) {
652                         (GuestAddress(0), _) => GuestAddress(0),
653                         (result, false) => result,
654                         (_, true) => panic!("guest address overflow"),
655                     }
656                 }
657                 // error happened
658                 e => return e,
659             }
660         }
661         if total == 0 {
662             Err(Error::InvalidGuestAddress(addr))
663         } else {
664             Ok(total)
665         }
666     }
667 
668     /// Get the host virtual address corresponding to the guest address.
669     ///
670     /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
671     /// have the capability to mmap the guest address range into virtual address space of the host
672     /// for direct access, so the corresponding host virtual address may be passed to other
673     /// subsystems.
674     ///
675     /// # Note
676     /// The underlying guest memory is not protected from memory aliasing, which breaks the
677     /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
678     /// concurrent accesses to the underlying guest memory.
679     ///
680     /// # Arguments
681     /// * `addr` - Guest address to convert.
682     ///
683     /// # Examples (uses the `backend-mmap` feature)
684     ///
685     /// ```
686     /// # #[cfg(feature = "backend-mmap")]
687     /// # {
688     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
689     /// #
690     /// # let start_addr = GuestAddress(0x1000);
691     /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)])
692     /// #    .expect("Could not create guest memory");
693     /// #
694     /// let addr = gm
695     ///     .get_host_address(GuestAddress(0x1200))
696     ///     .expect("Could not get host address");
697     /// println!("Host address is {:p}", addr);
698     /// # }
699     /// ```
get_host_address(&self, addr: GuestAddress) -> Result<*mut u8>700     fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
701         self.to_region_addr(addr)
702             .ok_or(Error::InvalidGuestAddress(addr))
703             .and_then(|(r, addr)| r.get_host_address(addr))
704     }
705 
706     /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
707     /// `addr`.
get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>>708     fn get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>> {
709         self.to_region_addr(addr)
710             .ok_or(Error::InvalidGuestAddress(addr))
711             .and_then(|(r, addr)| r.get_slice(addr, count))
712     }
713 }
714 
715 impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
716     type E = Error;
717 
write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize>718     fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
719         self.try_access(
720             buf.len(),
721             addr,
722             |offset, _count, caddr, region| -> Result<usize> {
723                 region.write(&buf[offset..], caddr)
724             },
725         )
726     }
727 
read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize>728     fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
729         self.try_access(
730             buf.len(),
731             addr,
732             |offset, _count, caddr, region| -> Result<usize> {
733                 region.read(&mut buf[offset..], caddr)
734             },
735         )
736     }
737 
738     /// # Examples
739     ///
740     /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature)
741     ///
742     /// ```
743     /// # #[cfg(feature = "backend-mmap")]
744     /// # {
745     /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
746     /// #
747     /// # let start_addr = GuestAddress(0x1000);
748     /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
749     /// #    .expect("Could not create guest memory");
750     /// #
751     /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr)
752     ///     .expect("Could not write slice to guest memory");
753     /// # }
754     /// ```
write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()>755     fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
756         let res = self.write(buf, addr)?;
757         if res != buf.len() {
758             return Err(Error::PartialBuffer {
759                 expected: buf.len(),
760                 completed: res,
761             });
762         }
763         Ok(())
764     }
765 
766     /// # Examples
767     ///
768     /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature)
769     ///
770     /// ```
771     /// # #[cfg(feature = "backend-mmap")]
772     /// # {
773     /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
774     /// #
775     /// let start_addr = GuestAddress(0x1000);
776     /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
777     ///     .expect("Could not create guest memory");
778     /// let buf = &mut [0u8; 16];
779     ///
780     /// gm.read_slice(buf, start_addr)
781     ///     .expect("Could not read slice from guest memory");
782     /// # }
783     /// ```
read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()>784     fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
785         let res = self.read(buf, addr)?;
786         if res != buf.len() {
787             return Err(Error::PartialBuffer {
788                 expected: buf.len(),
789                 completed: res,
790             });
791         }
792         Ok(())
793     }
794 
795     /// # Examples
796     ///
797     /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
798     ///
799     /// ```
800     /// # #[cfg(feature = "backend-mmap")]
801     /// # {
802     /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
803     /// # use std::fs::File;
804     /// # use std::path::Path;
805     /// #
806     /// # let start_addr = GuestAddress(0x1000);
807     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
808     /// #    .expect("Could not create guest memory");
809     /// # let addr = GuestAddress(0x1010);
810     /// # let mut file = if cfg!(unix) {
811     /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
812     /// #   file
813     /// # } else {
814     /// #   File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
815     /// #       .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
816     /// # };
817     ///
818     /// gm.read_from(addr, &mut file, 128)
819     ///     .expect("Could not read from /dev/urandom into guest memory");
820     ///
821     /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
822     /// let rand_val: u32 = gm
823     ///     .read_obj(read_addr)
824     ///     .expect("Could not read u32 val from /dev/urandom");
825     /// # }
826     /// ```
read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize> where F: Read,827     fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
828     where
829         F: Read,
830     {
831         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
832             // Check if something bad happened before doing unsafe things.
833             assert!(offset <= count);
834 
835             let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
836             let mut buf = vec![0u8; len].into_boxed_slice();
837 
838             loop {
839                 match src.read(&mut buf[..]) {
840                     Ok(bytes_read) => {
841                         // We don't need to update the dirty bitmap manually here because it's
842                         // expected to be handled by the logic within the `Bytes`
843                         // implementation for the region object.
844                         let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
845                         assert_eq!(bytes_written, bytes_read);
846                         break Ok(bytes_read);
847                     }
848                     Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
849                     Err(e) => break Err(Error::IOError(e)),
850                 }
851             }
852         })
853     }
854 
read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()> where F: Read,855     fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
856     where
857         F: Read,
858     {
859         let res = self.read_from(addr, src, count)?;
860         if res != count {
861             return Err(Error::PartialBuffer {
862                 expected: count,
863                 completed: res,
864             });
865         }
866         Ok(())
867     }
868 
869     /// # Examples
870     ///
871     /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
872     ///
873     /// ```
874     /// # #[cfg(not(unix))]
875     /// # extern crate vmm_sys_util;
876     /// # #[cfg(feature = "backend-mmap")]
877     /// # {
878     /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
879     /// #
880     /// # let start_addr = GuestAddress(0x1000);
881     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
882     /// #    .expect("Could not create guest memory");
883     /// # let mut file = if cfg!(unix) {
884     /// # use std::fs::OpenOptions;
885     /// let mut file = OpenOptions::new()
886     ///     .write(true)
887     ///     .open("/dev/null")
888     ///     .expect("Could not open /dev/null");
889     /// #   file
890     /// # } else {
891     /// #   use vmm_sys_util::tempfile::TempFile;
892     /// #   TempFile::new().unwrap().into_file()
893     /// # };
894     ///
895     /// gm.write_to(start_addr, &mut file, 128)
896     ///     .expect("Could not write 128 bytes to the provided address");
897     /// # }
898     /// ```
write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize> where F: Write,899     fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
900     where
901         F: Write,
902     {
903         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
904             // Check if something bad happened before doing unsafe things.
905             assert!(offset <= count);
906 
907             let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
908             let mut buf = vec![0u8; len].into_boxed_slice();
909             let bytes_read = region.read(&mut buf, caddr)?;
910             assert_eq!(bytes_read, len);
911             // For a non-RAM region, reading could have side effects, so we
912             // must use write_all().
913             dst.write_all(&buf).map_err(Error::IOError)?;
914             Ok(len)
915         })
916     }
917 
918     /// # Examples
919     ///
920     /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
921     ///
922     /// ```
923     /// # #[cfg(not(unix))]
924     /// # extern crate vmm_sys_util;
925     /// # #[cfg(feature = "backend-mmap")]
926     /// # {
927     /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
928     /// #
929     /// # let start_addr = GuestAddress(0x1000);
930     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
931     /// #    .expect("Could not create guest memory");
932     /// # let mut file = if cfg!(unix) {
933     /// # use std::fs::OpenOptions;
934     /// let mut file = OpenOptions::new()
935     ///     .write(true)
936     ///     .open("/dev/null")
937     ///     .expect("Could not open /dev/null");
938     /// #   file
939     /// # } else {
940     /// #   use vmm_sys_util::tempfile::TempFile;
941     /// #   TempFile::new().unwrap().into_file()
942     /// # };
943     ///
944     /// gm.write_all_to(start_addr, &mut file, 128)
945     ///     .expect("Could not write 128 bytes to the provided address");
946     /// # }
947     /// ```
write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> where F: Write,948     fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
949     where
950         F: Write,
951     {
952         let res = self.write_to(addr, dst, count)?;
953         if res != count {
954             return Err(Error::PartialBuffer {
955                 expected: count,
956                 completed: res,
957             });
958         }
959         Ok(())
960     }
961 
store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()>962     fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
963         // `find_region` should really do what `to_region_addr` is doing right now, except
964         // it should keep returning a `Result`.
965         self.to_region_addr(addr)
966             .ok_or(Error::InvalidGuestAddress(addr))
967             .and_then(|(region, region_addr)| region.store(val, region_addr, order))
968     }
969 
load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O>970     fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
971         self.to_region_addr(addr)
972             .ok_or(Error::InvalidGuestAddress(addr))
973             .and_then(|(region, region_addr)| region.load(region_addr, order))
974     }
975 }
976 
977 #[cfg(test)]
978 mod tests {
979     #![allow(clippy::undocumented_unsafe_blocks)]
980     use super::*;
981     #[cfg(feature = "backend-mmap")]
982     use crate::bytes::ByteValued;
983     #[cfg(feature = "backend-mmap")]
984     use crate::GuestAddress;
985     #[cfg(feature = "backend-mmap")]
986     use std::io::Cursor;
987     #[cfg(feature = "backend-mmap")]
988     use std::time::{Duration, Instant};
989 
990     use vmm_sys_util::tempfile::TempFile;
991 
992     #[cfg(feature = "backend-mmap")]
993     type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
994 
995     #[cfg(feature = "backend-mmap")]
make_image(size: u8) -> Vec<u8>996     fn make_image(size: u8) -> Vec<u8> {
997         let mut image: Vec<u8> = Vec::with_capacity(size as usize);
998         for i in 0..size {
999             image.push(i);
1000         }
1001         image
1002     }
1003 
1004     #[test]
test_file_offset()1005     fn test_file_offset() {
1006         let file = TempFile::new().unwrap().into_file();
1007         let start = 1234;
1008         let file_offset = FileOffset::new(file, start);
1009         assert_eq!(file_offset.start(), start);
1010         assert_eq!(
1011             file_offset.file() as *const File,
1012             file_offset.arc().as_ref() as *const File
1013         );
1014     }
1015 
1016     #[cfg(feature = "backend-mmap")]
1017     #[test]
checked_read_from()1018     fn checked_read_from() {
1019         let start_addr1 = GuestAddress(0x0);
1020         let start_addr2 = GuestAddress(0x40);
1021         let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
1022         let image = make_image(0x80);
1023         let offset = GuestAddress(0x30);
1024         let count: usize = 0x20;
1025         assert_eq!(
1026             0x20_usize,
1027             mem.read_from(offset, &mut Cursor::new(&image), count)
1028                 .unwrap()
1029         );
1030     }
1031 
1032     // Runs the provided closure in a loop, until at least `duration` time units have elapsed.
1033     #[cfg(feature = "backend-mmap")]
loop_timed<F>(duration: Duration, mut f: F) where F: FnMut(),1034     fn loop_timed<F>(duration: Duration, mut f: F)
1035     where
1036         F: FnMut(),
1037     {
1038         // We check the time every `CHECK_PERIOD` iterations.
1039         const CHECK_PERIOD: u64 = 1_000_000;
1040         let start_time = Instant::now();
1041 
1042         loop {
1043             for _ in 0..CHECK_PERIOD {
1044                 f();
1045             }
1046             if start_time.elapsed() >= duration {
1047                 break;
1048             }
1049         }
1050     }
1051 
1052     // Helper method for the following test. It spawns a writer and a reader thread, which
1053     // simultaneously try to access an object that is placed at the junction of two memory regions.
1054     // The part of the object that's continuously accessed is a member of type T. The writer
1055     // flips all the bits of the member with every write, while the reader checks that every byte
1056     // has the same value (and thus it did not do a non-atomic access). The test succeeds if
1057     // no mismatch is detected after performing accesses for a pre-determined amount of time.
1058     #[cfg(feature = "backend-mmap")]
1059     #[cfg(not(miri))] // This test simulates a race condition between guest and vmm
non_atomic_access_helper<T>() where T: ByteValued + std::fmt::Debug + From<u8> + Into<u128> + std::ops::Not<Output = T> + PartialEq,1060     fn non_atomic_access_helper<T>()
1061     where
1062         T: ByteValued
1063             + std::fmt::Debug
1064             + From<u8>
1065             + Into<u128>
1066             + std::ops::Not<Output = T>
1067             + PartialEq,
1068     {
1069         use std::mem;
1070         use std::thread;
1071 
1072         // A dummy type that's always going to have the same alignment as the first member,
1073         // and then adds some bytes at the end.
1074         #[derive(Clone, Copy, Debug, Default, PartialEq)]
1075         struct Data<T> {
1076             val: T,
1077             some_bytes: [u8; 8],
1078         }
1079 
1080         // Some sanity checks.
1081         assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
1082         assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
1083 
1084         // There must be no padding bytes, as otherwise implementing ByteValued is UB
1085         assert_eq!(mem::size_of::<Data<T>>(), mem::size_of::<T>() + 8);
1086 
1087         unsafe impl<T: ByteValued> ByteValued for Data<T> {}
1088 
1089         // Start of first guest memory region.
1090         let start = GuestAddress(0);
1091         let region_len = 1 << 12;
1092 
1093         // The address where we start writing/reading a Data<T> value.
1094         let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
1095 
1096         let mem = GuestMemoryMmap::from_ranges(&[
1097             (start, region_len),
1098             (start.unchecked_add(region_len as u64), region_len),
1099         ])
1100         .unwrap();
1101 
1102         // Need to clone this and move it into the new thread we create.
1103         let mem2 = mem.clone();
1104         // Just some bytes.
1105         let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255];
1106 
1107         let mut data = Data {
1108             val: T::from(0u8),
1109             some_bytes,
1110         };
1111 
1112         // Simple check that cross-region write/read is ok.
1113         mem.write_obj(data, data_start).unwrap();
1114         let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
1115         assert_eq!(read_data, data);
1116 
1117         let t = thread::spawn(move || {
1118             let mut count: u64 = 0;
1119 
1120             loop_timed(Duration::from_secs(3), || {
1121                 let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
1122 
1123                 // Every time data is written to memory by the other thread, the value of
1124                 // data.val alternates between 0 and T::MAX, so the inner bytes should always
1125                 // have the same value. If they don't match, it means we read a partial value,
1126                 // so the access was not atomic.
1127                 let bytes = data.val.into().to_le_bytes();
1128                 for i in 1..mem::size_of::<T>() {
1129                     if bytes[0] != bytes[i] {
1130                         panic!(
1131                             "val bytes don't match {:?} after {} iterations",
1132                             &bytes[..mem::size_of::<T>()],
1133                             count
1134                         );
1135                     }
1136                 }
1137                 count += 1;
1138             });
1139         });
1140 
1141         // Write the object while flipping the bits of data.val over and over again.
1142         loop_timed(Duration::from_secs(3), || {
1143             mem.write_obj(data, data_start).unwrap();
1144             data.val = !data.val;
1145         });
1146 
1147         t.join().unwrap()
1148     }
1149 
1150     #[cfg(feature = "backend-mmap")]
1151     #[test]
1152     #[cfg(not(miri))]
test_non_atomic_access()1153     fn test_non_atomic_access() {
1154         non_atomic_access_helper::<u16>()
1155     }
1156 
1157     #[cfg(feature = "backend-mmap")]
1158     #[test]
test_zero_length_accesses()1159     fn test_zero_length_accesses() {
1160         #[derive(Default, Clone, Copy)]
1161         #[repr(C)]
1162         struct ZeroSizedStruct {
1163             dummy: [u32; 0],
1164         }
1165 
1166         unsafe impl ByteValued for ZeroSizedStruct {}
1167 
1168         let addr = GuestAddress(0x1000);
1169         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1170         let obj = ZeroSizedStruct::default();
1171         let mut image = make_image(0x80);
1172 
1173         assert_eq!(mem.write(&[], addr).unwrap(), 0);
1174         assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
1175 
1176         assert!(mem.write_slice(&[], addr).is_ok());
1177         assert!(mem.read_slice(&mut [], addr).is_ok());
1178 
1179         assert!(mem.write_obj(obj, addr).is_ok());
1180         assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
1181 
1182         assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
1183 
1184         assert!(mem
1185             .read_exact_from(addr, &mut Cursor::new(&image), 0)
1186             .is_ok());
1187 
1188         assert_eq!(
1189             mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
1190             0
1191         );
1192 
1193         assert!(mem
1194             .write_all_to(addr, &mut Cursor::new(&mut image), 0)
1195             .is_ok());
1196     }
1197 
1198     #[cfg(feature = "backend-mmap")]
1199     #[test]
test_atomic_accesses()1200     fn test_atomic_accesses() {
1201         let addr = GuestAddress(0x1000);
1202         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1203         let bad_addr = addr.unchecked_add(0x1000);
1204 
1205         crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
1206     }
1207 
1208     #[cfg(feature = "backend-mmap")]
1209     #[cfg(target_os = "linux")]
1210     #[test]
test_guest_memory_mmap_is_hugetlbfs()1211     fn test_guest_memory_mmap_is_hugetlbfs() {
1212         let addr = GuestAddress(0x1000);
1213         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1214         let r = mem.find_region(addr).unwrap();
1215         assert_eq!(r.is_hugetlbfs(), None);
1216     }
1217 }
1218