1 // Portions Copyright 2019 Red Hat, Inc.
2 //
3 // Copyright 2017 The Chromium OS Authors. All rights reserved.
4 // Use of this source code is governed by a BSD-style license that can be
5 // found in the THIRT-PARTY file.
6 //
7 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
8 
9 //! Types for volatile access to memory.
10 //!
11 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
12 //! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
13 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
14 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
15 //! compiler reordering or eliding access because it has no visibility into what other systems are
16 //! doing with that hunk of memory.
17 //!
18 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
19 //! 1. No references or slices to volatile memory (`&` or `&mut`).
20 //! 2. Access should always been done with a volatile read or write.
21 //! The First rule is because having references of any kind to memory considered volatile would
22 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
23 //! done concurrently without synchronization. With volatile access we know that the compiler has
24 //! not reordered or elided the access.
25 
26 use std::cmp::min;
27 use std::io::{self, Read, Write};
28 use std::marker::PhantomData;
29 use std::mem::{align_of, size_of};
30 use std::ptr::copy;
31 use std::ptr::{read_volatile, write_volatile};
32 use std::result;
33 use std::sync::atomic::Ordering;
34 use std::usize;
35 
36 use crate::atomic_integer::AtomicInteger;
37 use crate::bitmap::{Bitmap, BitmapSlice, BS};
38 use crate::{AtomicAccess, ByteValued, Bytes};
39 
40 #[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
41 use crate::mmap_xen::{MmapXen as MmapInfo, MmapXenSlice};
42 
43 #[cfg(not(feature = "xen"))]
44 type MmapInfo = std::marker::PhantomData<()>;
45 
46 use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
47 
48 /// `VolatileMemory` related errors.
49 #[allow(missing_docs)]
50 #[derive(Debug, thiserror::Error)]
51 pub enum Error {
52     /// `addr` is out of bounds of the volatile memory slice.
53     #[error("address 0x{addr:x} is out of bounds")]
54     OutOfBounds { addr: usize },
55     /// Taking a slice at `base` with `offset` would overflow `usize`.
56     #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
57     Overflow { base: usize, offset: usize },
58     /// Taking a slice whose size overflows `usize`.
59     #[error("{nelements:?} elements of size {size:?} would overflow a usize")]
60     TooBig { nelements: usize, size: usize },
61     /// Trying to obtain a misaligned reference.
62     #[error("address 0x{addr:x} is not aligned to {alignment:?}")]
63     Misaligned { addr: usize, alignment: usize },
64     /// Writing to memory failed
65     #[error("{0}")]
66     IOError(io::Error),
67     /// Incomplete read or write
68     #[error("only used {completed} bytes in {expected} long buffer")]
69     PartialBuffer { expected: usize, completed: usize },
70 }
71 
72 /// Result of volatile memory operations.
73 pub type Result<T> = result::Result<T, Error>;
74 
75 /// Convenience function for computing `base + offset`.
76 ///
77 /// # Errors
78 ///
79 /// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset`
80 /// exceeds `usize::MAX`.
81 ///
82 /// # Examples
83 ///
84 /// ```
85 /// # use vm_memory::volatile_memory::compute_offset;
86 /// #
87 /// assert_eq!(108, compute_offset(100, 8).unwrap());
88 /// assert!(compute_offset(std::usize::MAX, 6).is_err());
89 /// ```
compute_offset(base: usize, offset: usize) -> Result<usize>90 pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
91     match base.checked_add(offset) {
92         None => Err(Error::Overflow { base, offset }),
93         Some(m) => Ok(m),
94     }
95 }
96 
97 /// Types that support raw volatile access to their data.
98 pub trait VolatileMemory {
99     /// Type used for dirty memory tracking.
100     type B: Bitmap;
101 
102     /// Gets the size of this slice.
len(&self) -> usize103     fn len(&self) -> usize;
104 
105     /// Check whether the region is empty.
is_empty(&self) -> bool106     fn is_empty(&self) -> bool {
107         self.len() == 0
108     }
109 
110     /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
111     /// `offset`.
112     ///
113     /// Note that the property `get_slice(offset, count).len() == count` MUST NOT be
114     /// relied on for the correctness of unsafe code. This is a safe function inside of a
115     /// safe trait, and implementors are under no obligation to follow its documentation.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<BS<Self::B>>>116     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<BS<Self::B>>>;
117 
118     /// Gets a slice of memory for the entire region that supports volatile access.
as_volatile_slice(&self) -> VolatileSlice<BS<Self::B>>119     fn as_volatile_slice(&self) -> VolatileSlice<BS<Self::B>> {
120         self.get_slice(0, self.len()).unwrap()
121     }
122 
123     /// Gets a `VolatileRef` at `offset`.
get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T, BS<Self::B>>>124     fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T, BS<Self::B>>> {
125         let slice = self.get_slice(offset, size_of::<T>())?;
126 
127         assert_eq!(
128             slice.len(),
129             size_of::<T>(),
130             "VolatileMemory::get_slice(offset, count) returned slice of length != count."
131         );
132 
133         // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
134         // slice.addr is valid memory of size slice.len(). The assert above ensures that
135         // the length of the slice is exactly enough to hold one `T`. Lastly, the lifetime of the
136         // returned VolatileRef match that of the VolatileSlice returned by get_slice and thus the
137         // lifetime one `self`.
138         unsafe {
139             Ok(VolatileRef::with_bitmap(
140                 slice.addr,
141                 slice.bitmap,
142                 slice.mmap,
143             ))
144         }
145     }
146 
147     /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at
148     /// `offset`.
get_array_ref<T: ByteValued>( &self, offset: usize, n: usize, ) -> Result<VolatileArrayRef<T, BS<Self::B>>>149     fn get_array_ref<T: ByteValued>(
150         &self,
151         offset: usize,
152         n: usize,
153     ) -> Result<VolatileArrayRef<T, BS<Self::B>>> {
154         // Use isize to avoid problems with ptr::offset and ptr::add down the line.
155         let nbytes = isize::try_from(n)
156             .ok()
157             .and_then(|n| n.checked_mul(size_of::<T>() as isize))
158             .ok_or(Error::TooBig {
159                 nelements: n,
160                 size: size_of::<T>(),
161             })?;
162         let slice = self.get_slice(offset, nbytes as usize)?;
163 
164         assert_eq!(
165             slice.len(),
166             nbytes as usize,
167             "VolatileMemory::get_slice(offset, count) returned slice of length != count."
168         );
169 
170         // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
171         // slice.addr is valid memory of size slice.len(). The assert above ensures that
172         // the length of the slice is exactly enough to hold `n` instances of `T`. Lastly, the lifetime of the
173         // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
174         // lifetime one `self`.
175         unsafe {
176             Ok(VolatileArrayRef::with_bitmap(
177                 slice.addr,
178                 n,
179                 slice.bitmap,
180                 slice.mmap,
181             ))
182         }
183     }
184 
185     /// Returns a reference to an instance of `T` at `offset`.
186     ///
187     /// # Safety
188     /// To use this safely, the caller must guarantee that there are no other
189     /// users of the given chunk of memory for the lifetime of the result.
190     ///
191     /// # Errors
192     ///
193     /// If the resulting pointer is not aligned, this method will return an
194     /// [`Error`](enum.Error.html).
aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T>195     unsafe fn aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T> {
196         let slice = self.get_slice(offset, size_of::<T>())?;
197         slice.check_alignment(align_of::<T>())?;
198 
199         assert_eq!(
200             slice.len(),
201             size_of::<T>(),
202             "VolatileMemory::get_slice(offset, count) returned slice of length != count."
203         );
204 
205         // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
206         // slice.addr is valid memory of size slice.len(). The assert above ensures that
207         // the length of the slice is exactly enough to hold one `T`.
208         // Dereferencing the pointer is safe because we check the alignment above, and the invariants
209         // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
210         // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
211         // lifetime one `self`.
212         unsafe { Ok(&*(slice.addr as *const T)) }
213     }
214 
215     /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed
216     /// using the resulting reference are not automatically accounted for by the dirty bitmap
217     /// tracking functionality.
218     ///
219     /// # Safety
220     ///
221     /// To use this safely, the caller must guarantee that there are no other
222     /// users of the given chunk of memory for the lifetime of the result.
223     ///
224     /// # Errors
225     ///
226     /// If the resulting pointer is not aligned, this method will return an
227     /// [`Error`](enum.Error.html).
aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T>228     unsafe fn aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T> {
229         let slice = self.get_slice(offset, size_of::<T>())?;
230         slice.check_alignment(align_of::<T>())?;
231 
232         assert_eq!(
233             slice.len(),
234             size_of::<T>(),
235             "VolatileMemory::get_slice(offset, count) returned slice of length != count."
236         );
237 
238         // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
239         // slice.addr is valid memory of size slice.len(). The assert above ensures that
240         // the length of the slice is exactly enough to hold one `T`.
241         // Dereferencing the pointer is safe because we check the alignment above, and the invariants
242         // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
243         // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
244         // lifetime one `self`.
245 
246         unsafe { Ok(&mut *(slice.addr as *mut T)) }
247     }
248 
249     /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed
250     /// using the resulting reference are not automatically accounted for by the dirty bitmap
251     /// tracking functionality.
252     ///
253     /// # Errors
254     ///
255     /// If the resulting pointer is not aligned, this method will return an
256     /// [`Error`](enum.Error.html).
get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T>257     fn get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T> {
258         let slice = self.get_slice(offset, size_of::<T>())?;
259         slice.check_alignment(align_of::<T>())?;
260 
261         assert_eq!(
262             slice.len(),
263             size_of::<T>(),
264             "VolatileMemory::get_slice(offset, count) returned slice of length != count."
265         );
266 
267         // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
268         // slice.addr is valid memory of size slice.len(). The assert above ensures that
269         // the length of the slice is exactly enough to hold one `T`.
270         // Dereferencing the pointer is safe because we check the alignment above. Lastly, the lifetime of the
271         // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
272         // lifetime one `self`.
273         unsafe { Ok(&*(slice.addr as *const T)) }
274     }
275 
276     /// Returns the sum of `base` and `offset` if the resulting address is valid.
compute_end_offset(&self, base: usize, offset: usize) -> Result<usize>277     fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
278         let mem_end = compute_offset(base, offset)?;
279         if mem_end > self.len() {
280             return Err(Error::OutOfBounds { addr: mem_end });
281         }
282         Ok(mem_end)
283     }
284 }
285 
286 impl<'a> From<&'a mut [u8]> for VolatileSlice<'a, ()> {
from(value: &'a mut [u8]) -> Self287     fn from(value: &'a mut [u8]) -> Self {
288         // SAFETY: Since we construct the VolatileSlice from a rust slice, we know that
289         // the memory at addr `value as *mut u8` is valid for reads and writes (because mutable
290         // reference) of len `value.len()`. Since the `VolatileSlice` inherits the lifetime `'a`,
291         // it is not possible to access/mutate `value` while the VolatileSlice is alive.
292         //
293         // Note that it is possible for multiple aliasing sub slices of this `VolatileSlice`s to
294         // be created through `VolatileSlice::subslice`. This is OK, as pointers are allowed to
295         // alias, and it is impossible to get rust-style references from a `VolatileSlice`.
296         unsafe { VolatileSlice::new(value.as_mut_ptr(), value.len()) }
297     }
298 }
299 
300 #[repr(C, packed)]
301 struct Packed<T>(T);
302 
303 /// A guard to perform mapping and protect unmapping of the memory.
304 pub struct PtrGuard {
305     addr: *mut u8,
306     len: usize,
307 
308     // This isn't used anymore, but it protects the slice from getting unmapped while in use.
309     // Once this goes out of scope, the memory is unmapped automatically.
310     #[cfg(all(feature = "xen", unix))]
311     _slice: MmapXenSlice,
312 }
313 
314 #[allow(clippy::len_without_is_empty)]
315 impl PtrGuard {
316     #[allow(unused_variables)]
new(mmap: Option<&MmapInfo>, addr: *mut u8, prot: i32, len: usize) -> Self317     fn new(mmap: Option<&MmapInfo>, addr: *mut u8, prot: i32, len: usize) -> Self {
318         #[cfg(all(feature = "xen", unix))]
319         let (addr, _slice) = {
320             let slice = MmapInfo::mmap(mmap, addr, prot, len);
321             (slice.addr(), slice)
322         };
323 
324         Self {
325             addr,
326             len,
327 
328             #[cfg(all(feature = "xen", unix))]
329             _slice,
330         }
331     }
332 
read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self333     fn read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
334         Self::new(mmap, addr, libc::PROT_READ, len)
335     }
336 
337     /// Returns a non-mutable pointer to the beginning of the slice.
as_ptr(&self) -> *const u8338     pub fn as_ptr(&self) -> *const u8 {
339         self.addr
340     }
341 
342     /// Gets the length of the mapped region.
len(&self) -> usize343     pub fn len(&self) -> usize {
344         self.len
345     }
346 }
347 
348 /// A mutable guard to perform mapping and protect unmapping of the memory.
349 pub struct PtrGuardMut(PtrGuard);
350 
351 #[allow(clippy::len_without_is_empty)]
352 impl PtrGuardMut {
write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self353     fn write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
354         Self(PtrGuard::new(mmap, addr, libc::PROT_WRITE, len))
355     }
356 
357     /// Returns a mutable pointer to the beginning of the slice. Mutable accesses performed
358     /// using the resulting pointer are not automatically accounted for by the dirty bitmap
359     /// tracking functionality.
as_ptr(&self) -> *mut u8360     pub fn as_ptr(&self) -> *mut u8 {
361         self.0.addr
362     }
363 
364     /// Gets the length of the mapped region.
len(&self) -> usize365     pub fn len(&self) -> usize {
366         self.0.len
367     }
368 }
369 
370 /// A slice of raw memory that supports volatile access.
371 #[derive(Clone, Copy, Debug)]
372 pub struct VolatileSlice<'a, B = ()> {
373     addr: *mut u8,
374     size: usize,
375     bitmap: B,
376     mmap: Option<&'a MmapInfo>,
377 }
378 
379 impl<'a> VolatileSlice<'a, ()> {
380     /// Creates a slice of raw memory that must support volatile access.
381     ///
382     /// # Safety
383     ///
384     /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
385     /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
386     /// must also guarantee that all other users of the given chunk of memory are using volatile
387     /// accesses.
new(addr: *mut u8, size: usize) -> VolatileSlice<'a>388     pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
389         Self::with_bitmap(addr, size, (), None)
390     }
391 }
392 
393 impl<'a, B: BitmapSlice> VolatileSlice<'a, B> {
394     /// Creates a slice of raw memory that must support volatile access, and uses the provided
395     /// `bitmap` object for dirty page tracking.
396     ///
397     /// # Safety
398     ///
399     /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
400     /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
401     /// must also guarantee that all other users of the given chunk of memory are using volatile
402     /// accesses.
with_bitmap( addr: *mut u8, size: usize, bitmap: B, mmap: Option<&'a MmapInfo>, ) -> VolatileSlice<'a, B>403     pub unsafe fn with_bitmap(
404         addr: *mut u8,
405         size: usize,
406         bitmap: B,
407         mmap: Option<&'a MmapInfo>,
408     ) -> VolatileSlice<'a, B> {
409         VolatileSlice {
410             addr,
411             size,
412             bitmap,
413             mmap,
414         }
415     }
416 
417     /// Returns a pointer to the beginning of the slice. Mutable accesses performed
418     /// using the resulting pointer are not automatically accounted for by the dirty bitmap
419     /// tracking functionality.
420     #[deprecated(
421         since = "0.12.1",
422         note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
423     )]
424     #[cfg(not(all(feature = "xen", unix)))]
as_ptr(&self) -> *mut u8425     pub fn as_ptr(&self) -> *mut u8 {
426         self.addr
427     }
428 
429     /// Returns a guard for the pointer to the underlying memory.
ptr_guard(&self) -> PtrGuard430     pub fn ptr_guard(&self) -> PtrGuard {
431         PtrGuard::read(self.mmap, self.addr, self.len())
432     }
433 
434     /// Returns a mutable guard for the pointer to the underlying memory.
ptr_guard_mut(&self) -> PtrGuardMut435     pub fn ptr_guard_mut(&self) -> PtrGuardMut {
436         PtrGuardMut::write(self.mmap, self.addr, self.len())
437     }
438 
439     /// Gets the size of this slice.
len(&self) -> usize440     pub fn len(&self) -> usize {
441         self.size
442     }
443 
444     /// Checks if the slice is empty.
is_empty(&self) -> bool445     pub fn is_empty(&self) -> bool {
446         self.size == 0
447     }
448 
449     /// Borrows the inner `BitmapSlice`.
bitmap(&self) -> &B450     pub fn bitmap(&self) -> &B {
451         &self.bitmap
452     }
453 
454     /// Divides one slice into two at an index.
455     ///
456     /// # Example
457     ///
458     /// ```
459     /// # use vm_memory::{VolatileMemory, VolatileSlice};
460     /// #
461     /// # // Create a buffer
462     /// # let mut mem = [0u8; 32];
463     /// #
464     /// # // Get a `VolatileSlice` from the buffer
465     /// let vslice = VolatileSlice::from(&mut mem[..]);
466     ///
467     /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice");
468     /// assert_eq!(8, start.len());
469     /// assert_eq!(24, end.len());
470     /// ```
split_at(&self, mid: usize) -> Result<(Self, Self)>471     pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> {
472         let end = self.offset(mid)?;
473         let start =
474             // SAFETY: safe because self.offset() already checked the bounds
475             unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone(), self.mmap) };
476 
477         Ok((start, end))
478     }
479 
480     /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
481     /// `offset` with `count` length.
482     ///
483     /// The returned subslice is a copy of this slice with the address increased by `offset` bytes
484     /// and the size set to `count` bytes.
subslice(&self, offset: usize, count: usize) -> Result<Self>485     pub fn subslice(&self, offset: usize, count: usize) -> Result<Self> {
486         let _ = self.compute_end_offset(offset, count)?;
487 
488         // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
489         // the lifetime is the same as the original slice.
490         unsafe {
491             Ok(VolatileSlice::with_bitmap(
492                 self.addr.add(offset),
493                 count,
494                 self.bitmap.slice_at(offset),
495                 self.mmap,
496             ))
497         }
498     }
499 
500     /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
501     /// `offset`.
502     ///
503     /// The returned subslice is a copy of this slice with the address increased by `count` bytes
504     /// and the size reduced by `count` bytes.
offset(&self, count: usize) -> Result<VolatileSlice<'a, B>>505     pub fn offset(&self, count: usize) -> Result<VolatileSlice<'a, B>> {
506         let new_addr = (self.addr as usize)
507             .checked_add(count)
508             .ok_or(Error::Overflow {
509                 base: self.addr as usize,
510                 offset: count,
511             })?;
512         let new_size = self
513             .size
514             .checked_sub(count)
515             .ok_or(Error::OutOfBounds { addr: new_addr })?;
516         // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
517         // memory of the original slice.
518         unsafe {
519             Ok(VolatileSlice::with_bitmap(
520                 self.addr.add(count),
521                 new_size,
522                 self.bitmap.slice_at(count),
523                 self.mmap,
524             ))
525         }
526     }
527 
528     /// Copies as many elements of type `T` as possible from this slice to `buf`.
529     ///
530     /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
531     /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
532     /// using volatile reads.
533     ///
534     /// # Examples
535     ///
536     /// ```
537     /// # use vm_memory::{VolatileMemory, VolatileSlice};
538     /// #
539     /// let mut mem = [0u8; 32];
540     /// let vslice = VolatileSlice::from(&mut mem[..]);
541     /// let mut buf = [5u8; 16];
542     /// let res = vslice.copy_to(&mut buf[..]);
543     ///
544     /// assert_eq!(16, res);
545     /// for &v in &buf[..] {
546     ///     assert_eq!(v, 0);
547     /// }
548     /// ```
copy_to<T>(&self, buf: &mut [T]) -> usize where T: ByteValued,549     pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
550     where
551         T: ByteValued,
552     {
553         // A fast path for u8/i8
554         if size_of::<T>() == 1 {
555             let total = buf.len().min(self.len());
556 
557             // SAFETY:
558             // - dst is valid for writes of at least `total`, since total <= buf.len()
559             // - src is valid for reads of at least `total` as total <= self.len()
560             // - The regions are non-overlapping as `src` points to guest memory and `buf` is
561             //   a slice and thus has to live outside of guest memory (there can be more slices to
562             //   guest memory without violating rust's aliasing rules)
563             // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
564             unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, self, total) }
565         } else {
566             let count = self.size / size_of::<T>();
567             let source = self.get_array_ref::<T>(0, count).unwrap();
568             source.copy_to(buf)
569         }
570     }
571 
572     /// Copies as many bytes as possible from this slice to the provided `slice`.
573     ///
574     /// The copies happen in an undefined order.
575     ///
576     /// # Examples
577     ///
578     /// ```
579     /// # use vm_memory::{VolatileMemory, VolatileSlice};
580     /// #
581     /// # // Create a buffer
582     /// # let mut mem = [0u8; 32];
583     /// #
584     /// # // Get a `VolatileSlice` from the buffer
585     /// # let vslice = VolatileSlice::from(&mut mem[..]);
586     /// #
587     /// vslice.copy_to_volatile_slice(
588     ///     vslice
589     ///         .get_slice(16, 16)
590     ///         .expect("Could not get VolatileSlice"),
591     /// );
592     /// ```
copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>)593     pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
594         // SAFETY: Safe because the pointers are range-checked when the slices
595         // are created, and they never escape the VolatileSlices.
596         // FIXME: ... however, is it really okay to mix non-volatile
597         // operations such as copy with read_volatile and write_volatile?
598         unsafe {
599             let count = min(self.size, slice.size);
600             copy(self.addr, slice.addr, count);
601             slice.bitmap.mark_dirty(0, count);
602         }
603     }
604 
605     /// Copies as many elements of type `T` as possible from `buf` to this slice.
606     ///
607     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
608     ///
609     /// # Examples
610     ///
611     /// ```
612     /// # use vm_memory::{VolatileMemory, VolatileSlice};
613     /// #
614     /// let mut mem = [0u8; 32];
615     /// let vslice = VolatileSlice::from(&mut mem[..]);
616     ///
617     /// let buf = [5u8; 64];
618     /// vslice.copy_from(&buf[..]);
619     ///
620     /// for i in 0..4 {
621     ///     let val = vslice
622     ///         .get_ref::<u32>(i * 4)
623     ///         .expect("Could not get value")
624     ///         .load();
625     ///     assert_eq!(val, 0x05050505);
626     /// }
627     /// ```
copy_from<T>(&self, buf: &[T]) where T: ByteValued,628     pub fn copy_from<T>(&self, buf: &[T])
629     where
630         T: ByteValued,
631     {
632         // A fast path for u8/i8
633         if size_of::<T>() == 1 {
634             let total = buf.len().min(self.len());
635             // SAFETY:
636             // - dst is valid for writes of at least `total`, since total <= self.len()
637             // - src is valid for reads of at least `total` as total <= buf.len()
638             // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
639             //   a slice and thus has to live outside of guest memory (there can be more slices to
640             //   guest memory without violating rust's aliasing rules)
641             // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
642             unsafe { copy_to_volatile_slice(self, buf.as_ptr() as *const u8, total) };
643         } else {
644             let count = self.size / size_of::<T>();
645             // It's ok to use unwrap here because `count` was computed based on the current
646             // length of `self`.
647             let dest = self.get_array_ref::<T>(0, count).unwrap();
648 
649             // No need to explicitly call `mark_dirty` after this call because
650             // `VolatileArrayRef::copy_from` already takes care of that.
651             dest.copy_from(buf);
652         };
653     }
654 
655     /// Checks if the current slice is aligned at `alignment` bytes.
check_alignment(&self, alignment: usize) -> Result<()>656     fn check_alignment(&self, alignment: usize) -> Result<()> {
657         // Check that the desired alignment is a power of two.
658         debug_assert!((alignment & (alignment - 1)) == 0);
659         if ((self.addr as usize) & (alignment - 1)) != 0 {
660             return Err(Error::Misaligned {
661                 addr: self.addr as usize,
662                 alignment,
663             });
664         }
665         Ok(())
666     }
667 }
668 
669 impl<B: BitmapSlice> Bytes<usize> for VolatileSlice<'_, B> {
670     type E = Error;
671 
672     /// # Examples
673     /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`.
674     ///
675     /// ```
676     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
677     /// #
678     /// let mut mem = [0u8; 1024];
679     /// let vslice = VolatileSlice::from(&mut mem[..]);
680     /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020);
681     ///
682     /// assert!(res.is_ok());
683     /// assert_eq!(res.unwrap(), 4);
684     /// ```
write(&self, buf: &[u8], addr: usize) -> Result<usize>685     fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
686         if buf.is_empty() {
687             return Ok(0);
688         }
689 
690         if addr >= self.size {
691             return Err(Error::OutOfBounds { addr });
692         }
693 
694         let total = buf.len().min(self.len() - addr);
695         let dst = self.subslice(addr, total)?;
696 
697         // SAFETY:
698         // We check above that `addr` is a valid offset within this volatile slice, and by
699         // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
700         // memory of length self.len(). Furthermore, both src and dst of the call to
701         // copy_to_volatile_slice are valid for reads and writes respectively of length `total`
702         // since total is the minimum of lengths of the memory areas pointed to. The areas do not
703         // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
704         // memory are possible without violating rust's aliasing rules).
705         Ok(unsafe { copy_to_volatile_slice(&dst, buf.as_ptr(), total) })
706     }
707 
708     /// # Examples
709     /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`.
710     ///
711     /// ```
712     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
713     /// #
714     /// let mut mem = [0u8; 1024];
715     /// let vslice = VolatileSlice::from(&mut mem[..]);
716     /// let buf = &mut [0u8; 16];
717     /// let res = vslice.read(buf, 1010);
718     ///
719     /// assert!(res.is_ok());
720     /// assert_eq!(res.unwrap(), 14);
721     /// ```
read(&self, buf: &mut [u8], addr: usize) -> Result<usize>722     fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
723         if buf.is_empty() {
724             return Ok(0);
725         }
726 
727         if addr >= self.size {
728             return Err(Error::OutOfBounds { addr });
729         }
730 
731         let total = buf.len().min(self.len() - addr);
732         let src = self.subslice(addr, total)?;
733 
734         // SAFETY:
735         // We check above that `addr` is a valid offset within this volatile slice, and by
736         // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
737         // memory of length self.len(). Furthermore, both src and dst of the call to
738         // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
739         // since total is the minimum of lengths of the memory areas pointed to. The areas do not
740         // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
741         // memory are possible without violating rust's aliasing rules).
742         unsafe { Ok(copy_from_volatile_slice(buf.as_mut_ptr(), &src, total)) }
743     }
744 
745     /// # Examples
746     /// * Write a slice at offset 256.
747     ///
748     /// ```
749     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
750     /// #
751     /// # // Create a buffer
752     /// # let mut mem = [0u8; 1024];
753     /// #
754     /// # // Get a `VolatileSlice` from the buffer
755     /// # let vslice = VolatileSlice::from(&mut mem[..]);
756     /// #
757     /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256);
758     ///
759     /// assert!(res.is_ok());
760     /// assert_eq!(res.unwrap(), ());
761     /// ```
write_slice(&self, buf: &[u8], addr: usize) -> Result<()>762     fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
763         // `mark_dirty` called within `self.write`.
764         let len = self.write(buf, addr)?;
765         if len != buf.len() {
766             return Err(Error::PartialBuffer {
767                 expected: buf.len(),
768                 completed: len,
769             });
770         }
771         Ok(())
772     }
773 
774     /// # Examples
775     /// * Read a slice of size 16 at offset 256.
776     ///
777     /// ```
778     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
779     /// #
780     /// # // Create a buffer
781     /// # let mut mem = [0u8; 1024];
782     /// #
783     /// # // Get a `VolatileSlice` from the buffer
784     /// # let vslice = VolatileSlice::from(&mut mem[..]);
785     /// #
786     /// let buf = &mut [0u8; 16];
787     /// let res = vslice.read_slice(buf, 256);
788     ///
789     /// assert!(res.is_ok());
790     /// ```
read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()>791     fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
792         let len = self.read(buf, addr)?;
793         if len != buf.len() {
794             return Err(Error::PartialBuffer {
795                 expected: buf.len(),
796                 completed: len,
797             });
798         }
799         Ok(())
800     }
801 
802     /// # Examples
803     ///
804     /// * Read bytes from /dev/urandom
805     ///
806     /// ```
807     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
808     /// # use std::fs::File;
809     /// # use std::path::Path;
810     /// #
811     /// # if cfg!(unix) {
812     /// # let mut mem = [0u8; 1024];
813     /// # let vslice = VolatileSlice::from(&mut mem[..]);
814     /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
815     ///
816     /// vslice
817     ///     .read_from(32, &mut file, 128)
818     ///     .expect("Could not read bytes from file into VolatileSlice");
819     ///
820     /// let rand_val: u32 = vslice
821     ///     .read_obj(40)
822     ///     .expect("Could not read value from VolatileSlice");
823     /// # }
824     /// ```
read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize> where F: Read,825     fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
826     where
827         F: Read,
828     {
829         let _ = self.compute_end_offset(addr, count)?;
830 
831         let mut dst = vec![0; count];
832 
833         let bytes_read = loop {
834             match src.read(&mut dst) {
835                 Ok(n) => break n,
836                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
837                 Err(e) => return Err(Error::IOError(e)),
838             }
839         };
840 
841         // There is no guarantee that the read implementation is well-behaved, see the docs for
842         // Read::read.
843         assert!(bytes_read <= count);
844 
845         let slice = self.subslice(addr, bytes_read)?;
846 
847         // SAFETY: We have checked via compute_end_offset that accessing the specified
848         // region of guest memory is valid. We asserted that the value returned by `read` is between
849         // 0 and count (the length of the buffer passed to it), and that the
850         // regions don't overlap because we allocated the Vec outside of guest memory.
851         Ok(unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), bytes_read) })
852     }
853 
854     /// # Examples
855     ///
856     /// * Read bytes from /dev/urandom
857     ///
858     /// ```
859     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
860     /// # use std::fs::File;
861     /// # use std::path::Path;
862     /// #
863     /// # if cfg!(unix) {
864     /// # let mut mem = [0u8; 1024];
865     /// # let vslice = VolatileSlice::from(&mut mem[..]);
866     /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
867     ///
868     /// vslice
869     ///     .read_exact_from(32, &mut file, 128)
870     ///     .expect("Could not read bytes from file into VolatileSlice");
871     ///
872     /// let rand_val: u32 = vslice
873     ///     .read_obj(40)
874     ///     .expect("Could not read value from VolatileSlice");
875     /// # }
876     /// ```
read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()> where F: Read,877     fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
878     where
879         F: Read,
880     {
881         let _ = self.compute_end_offset(addr, count)?;
882 
883         let mut dst = vec![0; count];
884 
885         // Read into buffer that can be copied into guest memory
886         src.read_exact(&mut dst).map_err(Error::IOError)?;
887 
888         let slice = self.subslice(addr, count)?;
889 
890         // SAFETY: We have checked via compute_end_offset that accessing the specified
891         // region of guest memory is valid. We know that `dst` has len `count`, and that the
892         // regions don't overlap because we allocated the Vec outside of guest memory
893         unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), count) };
894         Ok(())
895     }
896 
897     /// # Examples
898     ///
899     /// * Write 128 bytes to /dev/null
900     ///
901     /// ```
902     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
903     /// # use std::fs::OpenOptions;
904     /// # use std::path::Path;
905     /// #
906     /// # if cfg!(unix) {
907     /// # let mut mem = [0u8; 1024];
908     /// # let vslice = VolatileSlice::from(&mut mem[..]);
909     /// let mut file = OpenOptions::new()
910     ///     .write(true)
911     ///     .open("/dev/null")
912     ///     .expect("Could not open /dev/null");
913     ///
914     /// vslice
915     ///     .write_to(32, &mut file, 128)
916     ///     .expect("Could not write value from VolatileSlice to /dev/null");
917     /// # }
918     /// ```
write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize> where F: Write,919     fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
920     where
921         F: Write,
922     {
923         let _ = self.compute_end_offset(addr, count)?;
924         let mut src = Vec::with_capacity(count);
925 
926         let slice = self.subslice(addr, count)?;
927 
928         // SAFETY: We checked the addr and count so accessing the slice is safe.
929         // It is safe to read from volatile memory. The Vec has capacity for exactly `count`
930         // many bytes, and the memory regions pointed to definitely do not overlap, as we
931         // allocated src outside of guest memory.
932         // The call to set_len is safe because the bytes between 0 and count have been initialized
933         // via copying from guest memory, and the Vec's capacity is `count`
934         unsafe {
935             copy_from_volatile_slice(src.as_mut_ptr(), &slice, count);
936             src.set_len(count);
937         }
938 
939         loop {
940             match dst.write(&src) {
941                 Ok(n) => break Ok(n),
942                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
943                 Err(e) => break Err(Error::IOError(e)),
944             }
945         }
946     }
947 
948     /// # Examples
949     ///
950     /// * Write 128 bytes to /dev/null
951     ///
952     /// ```
953     /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
954     /// # use std::fs::OpenOptions;
955     /// # use std::path::Path;
956     /// #
957     /// # if cfg!(unix) {
958     /// # let mut mem = [0u8; 1024];
959     /// # let vslice = VolatileSlice::from(&mut mem[..]);
960     /// let mut file = OpenOptions::new()
961     ///     .write(true)
962     ///     .open("/dev/null")
963     ///     .expect("Could not open /dev/null");
964     ///
965     /// vslice
966     ///     .write_all_to(32, &mut file, 128)
967     ///     .expect("Could not write value from VolatileSlice to /dev/null");
968     /// # }
969     /// ```
write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()> where F: Write,970     fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
971     where
972         F: Write,
973     {
974         let _ = self.compute_end_offset(addr, count)?;
975         let mut src = Vec::with_capacity(count);
976 
977         let slice = self.subslice(addr, count)?;
978 
979         // SAFETY: We checked the addr and count so accessing the slice is safe.
980         // It is safe to read from volatile memory. The Vec has capacity for exactly `count`
981         // many bytes, and the memory regions pointed to definitely do not overlap, as we
982         // allocated src outside of guest memory.
983         // The call to set_len is safe because the bytes between 0 and count have been initialized
984         // via copying from guest memory, and the Vec's capacity is `count`
985         unsafe {
986             copy_from_volatile_slice(src.as_mut_ptr(), &slice, count);
987             src.set_len(count);
988         }
989 
990         dst.write_all(&src).map_err(Error::IOError)?;
991 
992         Ok(())
993     }
994 
store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()>995     fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
996         self.get_atomic_ref::<T::A>(addr).map(|r| {
997             r.store(val.into(), order);
998             self.bitmap.mark_dirty(addr, size_of::<T>())
999         })
1000     }
1001 
load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T>1002     fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
1003         self.get_atomic_ref::<T::A>(addr)
1004             .map(|r| r.load(order).into())
1005     }
1006 }
1007 
1008 impl<B: BitmapSlice> VolatileMemory for VolatileSlice<'_, B> {
1009     type B = B;
1010 
len(&self) -> usize1011     fn len(&self) -> usize {
1012         self.size
1013     }
1014 
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<B>>1015     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<B>> {
1016         let _ = self.compute_end_offset(offset, count)?;
1017         Ok(
1018             // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
1019             // the lifetime is the same as self.
1020             unsafe {
1021                 VolatileSlice::with_bitmap(
1022                     self.addr.add(offset),
1023                     count,
1024                     self.bitmap.slice_at(offset),
1025                     self.mmap,
1026                 )
1027             },
1028         )
1029     }
1030 }
1031 
1032 /// A memory location that supports volatile access to an instance of `T`.
1033 ///
1034 /// # Examples
1035 ///
1036 /// ```
1037 /// # use vm_memory::VolatileRef;
1038 /// #
1039 /// let mut v = 5u32;
1040 /// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) };
1041 ///
1042 /// assert_eq!(v, 5);
1043 /// assert_eq!(v_ref.load(), 5);
1044 /// v_ref.store(500);
1045 /// assert_eq!(v, 500);
1046 /// ```
1047 #[derive(Clone, Copy, Debug)]
1048 pub struct VolatileRef<'a, T, B = ()> {
1049     addr: *mut Packed<T>,
1050     bitmap: B,
1051     mmap: Option<&'a MmapInfo>,
1052 }
1053 
1054 impl<'a, T> VolatileRef<'a, T, ()>
1055 where
1056     T: ByteValued,
1057 {
1058     /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`.
1059     ///
1060     /// # Safety
1061     ///
1062     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
1063     /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
1064     /// must also guarantee that all other users of the given chunk of memory are using volatile
1065     /// accesses.
new(addr: *mut u8) -> Self1066     pub unsafe fn new(addr: *mut u8) -> Self {
1067         Self::with_bitmap(addr, (), None)
1068     }
1069 }
1070 
1071 #[allow(clippy::len_without_is_empty)]
1072 impl<'a, T, B> VolatileRef<'a, T, B>
1073 where
1074     T: ByteValued,
1075     B: BitmapSlice,
1076 {
1077     /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the
1078     /// provided `bitmap` object for dirty page tracking.
1079     ///
1080     /// # Safety
1081     ///
1082     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
1083     /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
1084     /// must also guarantee that all other users of the given chunk of memory are using volatile
1085     /// accesses.
with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self1086     pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self {
1087         VolatileRef {
1088             addr: addr as *mut Packed<T>,
1089             bitmap,
1090             mmap,
1091         }
1092     }
1093 
1094     /// Returns a pointer to the underlying memory. Mutable accesses performed
1095     /// using the resulting pointer are not automatically accounted for by the dirty bitmap
1096     /// tracking functionality.
1097     #[deprecated(
1098         since = "0.12.1",
1099         note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
1100     )]
1101     #[cfg(not(all(feature = "xen", unix)))]
as_ptr(&self) -> *mut u81102     pub fn as_ptr(&self) -> *mut u8 {
1103         self.addr as *mut u8
1104     }
1105 
1106     /// Returns a guard for the pointer to the underlying memory.
ptr_guard(&self) -> PtrGuard1107     pub fn ptr_guard(&self) -> PtrGuard {
1108         PtrGuard::read(self.mmap, self.addr as *mut u8, self.len())
1109     }
1110 
1111     /// Returns a mutable guard for the pointer to the underlying memory.
ptr_guard_mut(&self) -> PtrGuardMut1112     pub fn ptr_guard_mut(&self) -> PtrGuardMut {
1113         PtrGuardMut::write(self.mmap, self.addr as *mut u8, self.len())
1114     }
1115 
1116     /// Gets the size of the referenced type `T`.
1117     ///
1118     /// # Examples
1119     ///
1120     /// ```
1121     /// # use std::mem::size_of;
1122     /// # use vm_memory::VolatileRef;
1123     /// #
1124     /// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
1125     /// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
1126     /// ```
len(&self) -> usize1127     pub fn len(&self) -> usize {
1128         size_of::<T>()
1129     }
1130 
1131     /// Borrows the inner `BitmapSlice`.
bitmap(&self) -> &B1132     pub fn bitmap(&self) -> &B {
1133         &self.bitmap
1134     }
1135 
1136     /// Does a volatile write of the value `v` to the address of this ref.
1137     #[inline(always)]
store(&self, v: T)1138     pub fn store(&self, v: T) {
1139         let guard = self.ptr_guard_mut();
1140 
1141         // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
1142         unsafe { write_volatile(guard.as_ptr() as *mut Packed<T>, Packed::<T>(v)) };
1143         self.bitmap.mark_dirty(0, self.len())
1144     }
1145 
1146     /// Does a volatile read of the value at the address of this ref.
1147     #[inline(always)]
load(&self) -> T1148     pub fn load(&self) -> T {
1149         let guard = self.ptr_guard();
1150 
1151         // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
1152         // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
1153         // in this function with the commented code below and running `cargo test --release`.
1154         // unsafe { *(self.addr as *const T) }
1155         unsafe { read_volatile(guard.as_ptr() as *const Packed<T>).0 }
1156     }
1157 
1158     /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and
1159     /// address.
to_slice(&self) -> VolatileSlice<'a, B>1160     pub fn to_slice(&self) -> VolatileSlice<'a, B> {
1161         // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
1162         unsafe {
1163             VolatileSlice::with_bitmap(
1164                 self.addr as *mut u8,
1165                 size_of::<T>(),
1166                 self.bitmap.clone(),
1167                 self.mmap,
1168             )
1169         }
1170     }
1171 }
1172 
1173 /// A memory location that supports volatile access to an array of elements of type `T`.
1174 ///
1175 /// # Examples
1176 ///
1177 /// ```
1178 /// # use vm_memory::VolatileArrayRef;
1179 /// #
1180 /// let mut v = [5u32; 1];
1181 /// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) };
1182 ///
1183 /// assert_eq!(v[0], 5);
1184 /// assert_eq!(v_ref.load(0), 5);
1185 /// v_ref.store(0, 500);
1186 /// assert_eq!(v[0], 500);
1187 /// ```
1188 #[derive(Clone, Copy, Debug)]
1189 pub struct VolatileArrayRef<'a, T, B = ()> {
1190     addr: *mut u8,
1191     nelem: usize,
1192     bitmap: B,
1193     phantom: PhantomData<&'a T>,
1194     mmap: Option<&'a MmapInfo>,
1195 }
1196 
1197 impl<'a, T> VolatileArrayRef<'a, T>
1198 where
1199     T: ByteValued,
1200 {
1201     /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1202     /// type `T`.
1203     ///
1204     /// # Safety
1205     ///
1206     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1207     /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1208     /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1209     /// memory are using volatile accesses.
new(addr: *mut u8, nelem: usize) -> Self1210     pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self {
1211         Self::with_bitmap(addr, nelem, (), None)
1212     }
1213 }
1214 
1215 impl<'a, T, B> VolatileArrayRef<'a, T, B>
1216 where
1217     T: ByteValued,
1218     B: BitmapSlice,
1219 {
1220     /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1221     /// type `T`, using the provided `bitmap` object for dirty page tracking.
1222     ///
1223     /// # Safety
1224     ///
1225     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1226     /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1227     /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1228     /// memory are using volatile accesses.
with_bitmap( addr: *mut u8, nelem: usize, bitmap: B, mmap: Option<&'a MmapInfo>, ) -> Self1229     pub unsafe fn with_bitmap(
1230         addr: *mut u8,
1231         nelem: usize,
1232         bitmap: B,
1233         mmap: Option<&'a MmapInfo>,
1234     ) -> Self {
1235         VolatileArrayRef {
1236             addr,
1237             nelem,
1238             bitmap,
1239             phantom: PhantomData,
1240             mmap,
1241         }
1242     }
1243 
1244     /// Returns `true` if this array is empty.
1245     ///
1246     /// # Examples
1247     ///
1248     /// ```
1249     /// # use vm_memory::VolatileArrayRef;
1250     /// #
1251     /// let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1252     /// assert!(v_array.is_empty());
1253     /// ```
is_empty(&self) -> bool1254     pub fn is_empty(&self) -> bool {
1255         self.nelem == 0
1256     }
1257 
1258     /// Returns the number of elements in the array.
1259     ///
1260     /// # Examples
1261     ///
1262     /// ```
1263     /// # use vm_memory::VolatileArrayRef;
1264     /// #
1265     /// # let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 1) };
1266     /// assert_eq!(v_array.len(), 1);
1267     /// ```
len(&self) -> usize1268     pub fn len(&self) -> usize {
1269         self.nelem
1270     }
1271 
1272     /// Returns the size of `T`.
1273     ///
1274     /// # Examples
1275     ///
1276     /// ```
1277     /// # use std::mem::size_of;
1278     /// # use vm_memory::VolatileArrayRef;
1279     /// #
1280     /// let v_ref = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1281     /// assert_eq!(v_ref.element_size(), size_of::<u32>() as usize);
1282     /// ```
element_size(&self) -> usize1283     pub fn element_size(&self) -> usize {
1284         size_of::<T>()
1285     }
1286 
1287     /// Returns a pointer to the underlying memory. Mutable accesses performed
1288     /// using the resulting pointer are not automatically accounted for by the dirty bitmap
1289     /// tracking functionality.
1290     #[deprecated(
1291         since = "0.12.1",
1292         note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead"
1293     )]
1294     #[cfg(not(all(feature = "xen", unix)))]
as_ptr(&self) -> *mut u81295     pub fn as_ptr(&self) -> *mut u8 {
1296         self.addr
1297     }
1298 
1299     /// Returns a guard for the pointer to the underlying memory.
ptr_guard(&self) -> PtrGuard1300     pub fn ptr_guard(&self) -> PtrGuard {
1301         PtrGuard::read(self.mmap, self.addr, self.len())
1302     }
1303 
1304     /// Returns a mutable guard for the pointer to the underlying memory.
ptr_guard_mut(&self) -> PtrGuardMut1305     pub fn ptr_guard_mut(&self) -> PtrGuardMut {
1306         PtrGuardMut::write(self.mmap, self.addr, self.len())
1307     }
1308 
1309     /// Borrows the inner `BitmapSlice`.
bitmap(&self) -> &B1310     pub fn bitmap(&self) -> &B {
1311         &self.bitmap
1312     }
1313 
1314     /// Converts this to a `VolatileSlice` with the same size and address.
to_slice(&self) -> VolatileSlice<'a, B>1315     pub fn to_slice(&self) -> VolatileSlice<'a, B> {
1316         // SAFETY: Safe as long as the caller validated addr when creating this object.
1317         unsafe {
1318             VolatileSlice::with_bitmap(
1319                 self.addr,
1320                 self.nelem * self.element_size(),
1321                 self.bitmap.clone(),
1322                 self.mmap,
1323             )
1324         }
1325     }
1326 
1327     /// Does a volatile read of the element at `index`.
1328     ///
1329     /// # Panics
1330     ///
1331     /// Panics if `index` is less than the number of elements of the array to which `&self` points.
ref_at(&self, index: usize) -> VolatileRef<'a, T, B>1332     pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> {
1333         assert!(index < self.nelem);
1334         // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
1335         // memory of the VolatileArrayRef.
1336         unsafe {
1337             // byteofs must fit in an isize as it was checked in get_array_ref.
1338             let byteofs = (self.element_size() * index) as isize;
1339             let ptr = self.addr.offset(byteofs);
1340             VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize), self.mmap)
1341         }
1342     }
1343 
1344     /// Does a volatile read of the element at `index`.
load(&self, index: usize) -> T1345     pub fn load(&self, index: usize) -> T {
1346         self.ref_at(index).load()
1347     }
1348 
1349     /// Does a volatile write of the element at `index`.
store(&self, index: usize, value: T)1350     pub fn store(&self, index: usize, value: T) {
1351         // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic,
1352         // so no need to do that in this method as well.
1353         self.ref_at(index).store(value)
1354     }
1355 
1356     /// Copies as many elements of type `T` as possible from this array to `buf`.
1357     ///
1358     /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1359     /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
1360     /// using volatile reads.
1361     ///
1362     /// # Examples
1363     ///
1364     /// ```
1365     /// # use vm_memory::VolatileArrayRef;
1366     /// #
1367     /// let mut v = [0u8; 32];
1368     /// let v_ref = unsafe { VolatileArrayRef::new(v.as_mut_ptr(), v.len()) };
1369     ///
1370     /// let mut buf = [5u8; 16];
1371     /// v_ref.copy_to(&mut buf[..]);
1372     /// for &v in &buf[..] {
1373     ///     assert_eq!(v, 0);
1374     /// }
1375     /// ```
copy_to(&self, buf: &mut [T]) -> usize1376     pub fn copy_to(&self, buf: &mut [T]) -> usize {
1377         // A fast path for u8/i8
1378         if size_of::<T>() == 1 {
1379             let source = self.to_slice();
1380             let total = buf.len().min(source.len());
1381 
1382             // SAFETY:
1383             // - dst is valid for writes of at least `total`, since total <= buf.len()
1384             // - src is valid for reads of at least `total` as total <= source.len()
1385             // - The regions are non-overlapping as `src` points to guest memory and `buf` is
1386             //   a slice and thus has to live outside of guest memory (there can be more slices to
1387             //   guest memory without violating rust's aliasing rules)
1388             // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
1389             return unsafe {
1390                 copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, &source, total)
1391             };
1392         }
1393 
1394         let guard = self.ptr_guard();
1395         let mut ptr = guard.as_ptr() as *const Packed<T>;
1396         let start = ptr;
1397 
1398         for v in buf.iter_mut().take(self.len()) {
1399             // SAFETY: read_volatile is safe because the pointers are range-checked when
1400             // the slices are created, and they never escape the VolatileSlices.
1401             // ptr::add is safe because get_array_ref() validated that
1402             // size_of::<T>() * self.len() fits in an isize.
1403             unsafe {
1404                 *v = read_volatile(ptr).0;
1405                 ptr = ptr.add(1);
1406             }
1407         }
1408 
1409         // SAFETY: It is guaranteed that start and ptr point to the regions of the same slice.
1410         unsafe { ptr.offset_from(start) as usize }
1411     }
1412 
1413     /// Copies as many bytes as possible from this slice to the provided `slice`.
1414     ///
1415     /// The copies happen in an undefined order.
1416     ///
1417     /// # Examples
1418     ///
1419     /// ```
1420     /// # use vm_memory::VolatileArrayRef;
1421     /// #
1422     /// let mut v = [0u8; 32];
1423     /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
1424     /// let mut buf = [5u8; 16];
1425     /// let v_ref2 = unsafe { VolatileArrayRef::<u8>::new(buf.as_mut_ptr(), buf.len()) };
1426     ///
1427     /// v_ref.copy_to_volatile_slice(v_ref2.to_slice());
1428     /// for &v in &buf[..] {
1429     ///     assert_eq!(v, 0);
1430     /// }
1431     /// ```
copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>)1432     pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
1433         // SAFETY: Safe because the pointers are range-checked when the slices
1434         // are created, and they never escape the VolatileSlices.
1435         // FIXME: ... however, is it really okay to mix non-volatile
1436         // operations such as copy with read_volatile and write_volatile?
1437         unsafe {
1438             let count = min(self.len() * self.element_size(), slice.size);
1439             copy(self.addr, slice.addr, count);
1440             slice.bitmap.mark_dirty(0, count);
1441         }
1442     }
1443 
1444     /// Copies as many elements of type `T` as possible from `buf` to this slice.
1445     ///
1446     /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1447     /// to this slice's memory. The copy happens from smallest to largest address in
1448     /// `T` sized chunks using volatile writes.
1449     ///
1450     /// # Examples
1451     ///
1452     /// ```
1453     /// # use vm_memory::VolatileArrayRef;
1454     /// #
1455     /// let mut v = [0u8; 32];
1456     /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
1457     ///
1458     /// let buf = [5u8; 64];
1459     /// v_ref.copy_from(&buf[..]);
1460     /// for &val in &v[..] {
1461     ///     assert_eq!(5u8, val);
1462     /// }
1463     /// ```
copy_from(&self, buf: &[T])1464     pub fn copy_from(&self, buf: &[T]) {
1465         // A fast path for u8/i8
1466         if size_of::<T>() == 1 {
1467             let destination = self.to_slice();
1468             let total = buf.len().min(destination.len());
1469 
1470             // absurd formatting brought to you by clippy
1471             // SAFETY:
1472             // - dst is valid for writes of at least `total`, since total <= destination.len()
1473             // - src is valid for reads of at least `total` as total <= buf.len()
1474             // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
1475             //   a slice and thus has to live outside of guest memory (there can be more slices to
1476             //   guest memory without violating rust's aliasing rules)
1477             // - size is always a multiple of alignment, so treating *const T as *const u8 is fine
1478             unsafe { copy_to_volatile_slice(&destination, buf.as_ptr() as *const u8, total) };
1479         } else {
1480             let guard = self.ptr_guard_mut();
1481             let start = guard.as_ptr();
1482             let mut ptr = start as *mut Packed<T>;
1483 
1484             for &v in buf.iter().take(self.len()) {
1485                 // SAFETY: write_volatile is safe because the pointers are range-checked when
1486                 // the slices are created, and they never escape the VolatileSlices.
1487                 // ptr::add is safe because get_array_ref() validated that
1488                 // size_of::<T>() * self.len() fits in an isize.
1489                 unsafe {
1490                     write_volatile(ptr, Packed::<T>(v));
1491                     ptr = ptr.add(1);
1492                 }
1493             }
1494 
1495             self.bitmap.mark_dirty(0, ptr as usize - start as usize);
1496         }
1497     }
1498 }
1499 
1500 impl<'a, B: BitmapSlice> From<VolatileSlice<'a, B>> for VolatileArrayRef<'a, u8, B> {
from(slice: VolatileSlice<'a, B>) -> Self1501     fn from(slice: VolatileSlice<'a, B>) -> Self {
1502         // SAFETY: Safe because the result has the same lifetime and points to the same
1503         // memory as the incoming VolatileSlice.
1504         unsafe { VolatileArrayRef::with_bitmap(slice.addr, slice.len(), slice.bitmap, slice.mmap) }
1505     }
1506 }
1507 
1508 // Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
1509 // cause test_non_atomic_access to fail.
alignment(addr: usize) -> usize1510 fn alignment(addr: usize) -> usize {
1511     // Rust is silly and does not let me write addr & -addr.
1512     addr & (!addr + 1)
1513 }
1514 
1515 mod copy_slice_impl {
1516     use super::*;
1517 
1518     // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
1519     // - `src_addr` and `dst_addr` must be valid for reads/writes.
1520     // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
1521     // - `src_addr` must point to a properly initialized value, which is true here because
1522     //   we're only using integer primitives.
copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8)1523     unsafe fn copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8) {
1524         match align {
1525             8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)),
1526             4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)),
1527             2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)),
1528             1 => write_volatile(dst_addr, read_volatile(src_addr)),
1529             _ => unreachable!(),
1530         }
1531     }
1532 
1533     /// Copies `total` bytes from `src` to `dst` using a loop of volatile reads and writes
1534     ///
1535     /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
1536     /// length `total`. The regions must not overlap
copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize1537     unsafe fn copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize {
1538         let mut left = total;
1539 
1540         let align = min(alignment(src as usize), alignment(dst as usize));
1541 
1542         let mut copy_aligned_slice = |min_align| {
1543             if align < min_align {
1544                 return;
1545             }
1546 
1547             while left >= min_align {
1548                 // SAFETY: Safe because we check alignment beforehand, the memory areas are valid
1549                 // for reads/writes, and the source always contains a valid value.
1550                 unsafe { copy_single(min_align, src, dst) };
1551 
1552                 left -= min_align;
1553 
1554                 if left == 0 {
1555                     break;
1556                 }
1557 
1558                 // SAFETY: We only explain the invariants for `src`, the argument for `dst` is
1559                 // analogous.
1560                 // - `src` and `src + min_align` are within (or one byte past) the same allocated object
1561                 //   This is given by the invariant on this function ensuring that [src, src + total)
1562                 //   are part of the same allocated object, and the condition on the while loop
1563                 //   ensures that we do not go outside this object
1564                 // - The computed offset in bytes cannot overflow isize, because `min_align` is at
1565                 //   most 8 when the closure is called (see below)
1566                 // - The sum `src as usize + min_align` can only wrap around if src as usize + min_align - 1 == usize::MAX,
1567                 //   however in this case, left == 0, and we'll have exited the loop above.
1568                 unsafe {
1569                     src = src.add(min_align);
1570                     dst = dst.add(min_align);
1571                 }
1572             }
1573         };
1574 
1575         if size_of::<usize>() > 4 {
1576             copy_aligned_slice(8);
1577         }
1578         copy_aligned_slice(4);
1579         copy_aligned_slice(2);
1580         copy_aligned_slice(1);
1581 
1582         total
1583     }
1584 
1585     /// Copies `total` bytes from `src` to `dst`
1586     ///
1587     /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
1588     /// length `total`. The regions must not overlap
copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize1589     unsafe fn copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize {
1590         if total <= size_of::<usize>() {
1591             // SAFETY: Invariants of copy_slice_volatile are the same as invariants of copy_slice
1592             unsafe {
1593                 copy_slice_volatile(dst, src, total);
1594             };
1595         } else {
1596             // SAFETY:
1597             // - Both src and dst are allocated for reads/writes of length `total` by function
1598             //   invariant
1599             // - src and dst are properly aligned, as any alignment is valid for u8
1600             // - The regions are not overlapping by function invariant
1601             unsafe {
1602                 std::ptr::copy_nonoverlapping(src, dst, total);
1603             }
1604         }
1605 
1606         total
1607     }
1608 
1609     /// Copies `total` bytes from `slice` to `dst`
1610     ///
1611     /// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at
1612     /// least length `total`. The regions must not overlap.
copy_from_volatile_slice<B: BitmapSlice>( dst: *mut u8, slice: &VolatileSlice<'_, B>, total: usize, ) -> usize1613     pub(super) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
1614         dst: *mut u8,
1615         slice: &VolatileSlice<'_, B>,
1616         total: usize,
1617     ) -> usize {
1618         let guard = slice.ptr_guard();
1619 
1620         // SAFETY: guaranteed by function invariants.
1621         copy_slice(dst, guard.as_ptr(), total)
1622     }
1623 
1624     /// Copies `total` bytes from 'src' to `slice`
1625     ///
1626     /// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at
1627     /// least length `total`. The regions must not overlap.
copy_to_volatile_slice<B: BitmapSlice>( slice: &VolatileSlice<'_, B>, src: *const u8, total: usize, ) -> usize1628     pub(super) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
1629         slice: &VolatileSlice<'_, B>,
1630         src: *const u8,
1631         total: usize,
1632     ) -> usize {
1633         let guard = slice.ptr_guard_mut();
1634 
1635         // SAFETY: guaranteed by function invariants.
1636         let count = copy_slice(guard.as_ptr(), src, total);
1637         slice.bitmap.mark_dirty(0, count);
1638         count
1639     }
1640 }
1641 
1642 #[cfg(test)]
1643 mod tests {
1644     #![allow(clippy::undocumented_unsafe_blocks)]
1645 
1646     use super::*;
1647     use std::alloc::Layout;
1648 
1649     use std::fs::File;
1650     use std::io::Cursor;
1651     use std::mem::size_of_val;
1652     use std::path::Path;
1653     use std::sync::atomic::{AtomicUsize, Ordering};
1654     use std::sync::{Arc, Barrier};
1655     use std::thread::spawn;
1656 
1657     use matches::assert_matches;
1658     use vmm_sys_util::tempfile::TempFile;
1659 
1660     use crate::bitmap::tests::{
1661         check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory,
1662     };
1663     use crate::bitmap::{AtomicBitmap, RefSlice};
1664 
1665     #[test]
test_display_error()1666     fn test_display_error() {
1667         assert_eq!(
1668             format!("{}", Error::OutOfBounds { addr: 0x10 }),
1669             "address 0x10 is out of bounds"
1670         );
1671 
1672         assert_eq!(
1673             format!(
1674                 "{}",
1675                 Error::Overflow {
1676                     base: 0x0,
1677                     offset: 0x10
1678                 }
1679             ),
1680             "address 0x0 offset by 0x10 would overflow"
1681         );
1682 
1683         assert_eq!(
1684             format!(
1685                 "{}",
1686                 Error::TooBig {
1687                     nelements: 100_000,
1688                     size: 1_000_000_000
1689                 }
1690             ),
1691             "100000 elements of size 1000000000 would overflow a usize"
1692         );
1693 
1694         assert_eq!(
1695             format!(
1696                 "{}",
1697                 Error::Misaligned {
1698                     addr: 0x4,
1699                     alignment: 8
1700                 }
1701             ),
1702             "address 0x4 is not aligned to 8"
1703         );
1704 
1705         assert_eq!(
1706             format!(
1707                 "{}",
1708                 Error::PartialBuffer {
1709                     expected: 100,
1710                     completed: 90
1711                 }
1712             ),
1713             "only used 90 bytes in 100 long buffer"
1714         );
1715     }
1716 
1717     #[test]
misaligned_ref()1718     fn misaligned_ref() {
1719         let mut a = [0u8; 3];
1720         let a_ref = VolatileSlice::from(&mut a[..]);
1721         unsafe {
1722             assert!(
1723                 a_ref.aligned_as_ref::<u16>(0).is_err() ^ a_ref.aligned_as_ref::<u16>(1).is_err()
1724             );
1725             assert!(
1726                 a_ref.aligned_as_mut::<u16>(0).is_err() ^ a_ref.aligned_as_mut::<u16>(1).is_err()
1727             );
1728         }
1729     }
1730 
1731     #[test]
atomic_store()1732     fn atomic_store() {
1733         let mut a = [0usize; 1];
1734         {
1735             let a_ref = unsafe {
1736                 VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>())
1737             };
1738             let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1739             atomic.store(2usize, Ordering::Relaxed)
1740         }
1741         assert_eq!(a[0], 2);
1742     }
1743 
1744     #[test]
atomic_load()1745     fn atomic_load() {
1746         let mut a = [5usize; 1];
1747         {
1748             let a_ref = unsafe {
1749                 VolatileSlice::new(&mut a[0] as *mut usize as *mut u8,
1750                                    size_of::<usize>())
1751             };
1752             let atomic = {
1753                 let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1754                 assert_eq!(atomic.load(Ordering::Relaxed), 5usize);
1755                 atomic
1756             };
1757             // To make sure we can take the atomic out of the scope we made it in:
1758             atomic.load(Ordering::Relaxed);
1759             // but not too far:
1760             // atomicu8
1761         } //.load(std::sync::atomic::Ordering::Relaxed)
1762         ;
1763     }
1764 
1765     #[test]
misaligned_atomic()1766     fn misaligned_atomic() {
1767         let mut a = [5usize, 5usize];
1768         let a_ref =
1769             unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>()) };
1770         assert!(a_ref.get_atomic_ref::<AtomicUsize>(0).is_ok());
1771         assert!(a_ref.get_atomic_ref::<AtomicUsize>(1).is_err());
1772     }
1773 
1774     #[test]
ref_store()1775     fn ref_store() {
1776         let mut a = [0u8; 1];
1777         {
1778             let a_ref = VolatileSlice::from(&mut a[..]);
1779             let v_ref = a_ref.get_ref(0).unwrap();
1780             v_ref.store(2u8);
1781         }
1782         assert_eq!(a[0], 2);
1783     }
1784 
1785     #[test]
ref_load()1786     fn ref_load() {
1787         let mut a = [5u8; 1];
1788         {
1789             let a_ref = VolatileSlice::from(&mut a[..]);
1790             let c = {
1791                 let v_ref = a_ref.get_ref::<u8>(0).unwrap();
1792                 assert_eq!(v_ref.load(), 5u8);
1793                 v_ref
1794             };
1795             // To make sure we can take a v_ref out of the scope we made it in:
1796             c.load();
1797             // but not too far:
1798             // c
1799         } //.load()
1800         ;
1801     }
1802 
1803     #[test]
ref_to_slice()1804     fn ref_to_slice() {
1805         let mut a = [1u8; 5];
1806         let a_ref = VolatileSlice::from(&mut a[..]);
1807         let v_ref = a_ref.get_ref(1).unwrap();
1808         v_ref.store(0x1234_5678u32);
1809         let ref_slice = v_ref.to_slice();
1810         assert_eq!(v_ref.addr as usize, ref_slice.addr as usize);
1811         assert_eq!(v_ref.len(), ref_slice.len());
1812         assert!(!ref_slice.is_empty());
1813     }
1814 
1815     #[test]
observe_mutate()1816     fn observe_mutate() {
1817         struct RawMemory(*mut u8);
1818 
1819         // SAFETY: we use property synchronization below
1820         unsafe impl Send for RawMemory {}
1821         unsafe impl Sync for RawMemory {}
1822 
1823         let mem = Arc::new(RawMemory(unsafe {
1824             std::alloc::alloc(Layout::from_size_align(1, 1).unwrap())
1825         }));
1826 
1827         let outside_slice = unsafe { VolatileSlice::new(Arc::clone(&mem).0, 1) };
1828         let inside_arc = Arc::clone(&mem);
1829 
1830         let v_ref = outside_slice.get_ref::<u8>(0).unwrap();
1831         let barrier = Arc::new(Barrier::new(2));
1832         let barrier1 = barrier.clone();
1833 
1834         v_ref.store(99);
1835         spawn(move || {
1836             barrier1.wait();
1837             let inside_slice = unsafe { VolatileSlice::new(inside_arc.0, 1) };
1838             let clone_v_ref = inside_slice.get_ref::<u8>(0).unwrap();
1839             clone_v_ref.store(0);
1840             barrier1.wait();
1841         });
1842 
1843         assert_eq!(v_ref.load(), 99);
1844         barrier.wait();
1845         barrier.wait();
1846         assert_eq!(v_ref.load(), 0);
1847 
1848         unsafe { std::alloc::dealloc(mem.0, Layout::from_size_align(1, 1).unwrap()) }
1849     }
1850 
1851     #[test]
mem_is_empty()1852     fn mem_is_empty() {
1853         let mut backing = vec![0u8; 100];
1854         let a = VolatileSlice::from(backing.as_mut_slice());
1855         assert!(!a.is_empty());
1856 
1857         let mut backing = vec![];
1858         let a = VolatileSlice::from(backing.as_mut_slice());
1859         assert!(a.is_empty());
1860     }
1861 
1862     #[test]
slice_len()1863     fn slice_len() {
1864         let mut backing = vec![0u8; 100];
1865         let mem = VolatileSlice::from(backing.as_mut_slice());
1866         let slice = mem.get_slice(0, 27).unwrap();
1867         assert_eq!(slice.len(), 27);
1868         assert!(!slice.is_empty());
1869 
1870         let slice = mem.get_slice(34, 27).unwrap();
1871         assert_eq!(slice.len(), 27);
1872         assert!(!slice.is_empty());
1873 
1874         let slice = slice.get_slice(20, 5).unwrap();
1875         assert_eq!(slice.len(), 5);
1876         assert!(!slice.is_empty());
1877 
1878         let slice = mem.get_slice(34, 0).unwrap();
1879         assert!(slice.is_empty());
1880     }
1881 
1882     #[test]
slice_subslice()1883     fn slice_subslice() {
1884         let mut backing = vec![0u8; 100];
1885         let mem = VolatileSlice::from(backing.as_mut_slice());
1886         let slice = mem.get_slice(0, 100).unwrap();
1887         assert!(slice.write(&[1; 80], 10).is_ok());
1888 
1889         assert!(slice.subslice(0, 0).is_ok());
1890         assert!(slice.subslice(0, 101).is_err());
1891 
1892         assert!(slice.subslice(99, 0).is_ok());
1893         assert!(slice.subslice(99, 1).is_ok());
1894         assert!(slice.subslice(99, 2).is_err());
1895 
1896         assert!(slice.subslice(100, 0).is_ok());
1897         assert!(slice.subslice(100, 1).is_err());
1898 
1899         assert!(slice.subslice(101, 0).is_err());
1900         assert!(slice.subslice(101, 1).is_err());
1901 
1902         assert!(slice.subslice(std::usize::MAX, 2).is_err());
1903         assert!(slice.subslice(2, std::usize::MAX).is_err());
1904 
1905         let maybe_offset_slice = slice.subslice(10, 80);
1906         assert!(maybe_offset_slice.is_ok());
1907         let offset_slice = maybe_offset_slice.unwrap();
1908         assert_eq!(offset_slice.len(), 80);
1909 
1910         let mut buf = [0; 80];
1911         assert!(offset_slice.read(&mut buf, 0).is_ok());
1912         assert_eq!(&buf[0..80], &[1; 80][0..80]);
1913     }
1914 
1915     #[test]
slice_offset()1916     fn slice_offset() {
1917         let mut backing = vec![0u8; 100];
1918         let mem = VolatileSlice::from(backing.as_mut_slice());
1919         let slice = mem.get_slice(0, 100).unwrap();
1920         assert!(slice.write(&[1; 80], 10).is_ok());
1921 
1922         assert!(slice.offset(101).is_err());
1923 
1924         let maybe_offset_slice = slice.offset(10);
1925         assert!(maybe_offset_slice.is_ok());
1926         let offset_slice = maybe_offset_slice.unwrap();
1927         assert_eq!(offset_slice.len(), 90);
1928         let mut buf = [0; 90];
1929         assert!(offset_slice.read(&mut buf, 0).is_ok());
1930         assert_eq!(&buf[0..80], &[1; 80][0..80]);
1931         assert_eq!(&buf[80..90], &[0; 10][0..10]);
1932     }
1933 
1934     #[test]
slice_copy_to_u8()1935     fn slice_copy_to_u8() {
1936         let mut a = [2u8, 4, 6, 8, 10];
1937         let mut b = [0u8; 4];
1938         let mut c = [0u8; 6];
1939         let a_ref = VolatileSlice::from(&mut a[..]);
1940         let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap();
1941         v_ref.copy_to(&mut b[..]);
1942         v_ref.copy_to(&mut c[..]);
1943         assert_eq!(b[0..4], a[0..4]);
1944         assert_eq!(c[0..5], a[0..5]);
1945     }
1946 
1947     #[test]
slice_copy_to_u16()1948     fn slice_copy_to_u16() {
1949         let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5];
1950         let mut b = [0u16; 4];
1951         let mut c = [0u16; 6];
1952         let a_ref = &mut a[..];
1953         let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) };
1954 
1955         v_ref.copy_to(&mut b[..]);
1956         v_ref.copy_to(&mut c[..]);
1957         assert_eq!(b[0..4], a_ref[0..4]);
1958         assert_eq!(c[0..4], a_ref[0..4]);
1959         assert_eq!(c[4], 0);
1960     }
1961 
1962     #[test]
slice_copy_from_u8()1963     fn slice_copy_from_u8() {
1964         let a = [2u8, 4, 6, 8, 10];
1965         let mut b = [0u8; 4];
1966         let mut c = [0u8; 6];
1967         let b_ref = VolatileSlice::from(&mut b[..]);
1968         let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap();
1969         v_ref.copy_from(&a[..]);
1970         assert_eq!(b[0..4], a[0..4]);
1971 
1972         let c_ref = VolatileSlice::from(&mut c[..]);
1973         let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap();
1974         v_ref.copy_from(&a[..]);
1975         assert_eq!(c[0..5], a[0..5]);
1976     }
1977 
1978     #[test]
slice_copy_from_u16()1979     fn slice_copy_from_u16() {
1980         let a = [2u16, 4, 6, 8, 10];
1981         let mut b = [0u16; 4];
1982         let mut c = [0u16; 6];
1983         let b_ref = &mut b[..];
1984         let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) };
1985         v_ref.copy_from(&a[..]);
1986         assert_eq!(b_ref[0..4], a[0..4]);
1987 
1988         let c_ref = &mut c[..];
1989         let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) };
1990         v_ref.copy_from(&a[..]);
1991         assert_eq!(c_ref[0..4], a[0..4]);
1992         assert_eq!(c_ref[4], 0);
1993     }
1994 
1995     #[test]
slice_copy_to_volatile_slice()1996     fn slice_copy_to_volatile_slice() {
1997         let mut a = [2u8, 4, 6, 8, 10];
1998         let a_ref = VolatileSlice::from(&mut a[..]);
1999         let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
2000 
2001         let mut b = [0u8; 4];
2002         let b_ref = VolatileSlice::from(&mut b[..]);
2003         let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap();
2004 
2005         a_slice.copy_to_volatile_slice(b_slice);
2006         assert_eq!(b, [2, 4, 6, 8]);
2007     }
2008 
2009     #[test]
slice_overflow_error()2010     fn slice_overflow_error() {
2011         use std::usize::MAX;
2012         let mut backing = vec![0u8];
2013         let a = VolatileSlice::from(backing.as_mut_slice());
2014         let res = a.get_slice(MAX, 1).unwrap_err();
2015         assert_matches!(
2016             res,
2017             Error::Overflow {
2018                 base: MAX,
2019                 offset: 1,
2020             }
2021         );
2022     }
2023 
2024     #[test]
slice_oob_error()2025     fn slice_oob_error() {
2026         let mut backing = vec![0u8; 100];
2027         let a = VolatileSlice::from(backing.as_mut_slice());
2028         a.get_slice(50, 50).unwrap();
2029         let res = a.get_slice(55, 50).unwrap_err();
2030         assert_matches!(res, Error::OutOfBounds { addr: 105 });
2031     }
2032 
2033     #[test]
ref_overflow_error()2034     fn ref_overflow_error() {
2035         use std::usize::MAX;
2036         let mut backing = vec![0u8];
2037         let a = VolatileSlice::from(backing.as_mut_slice());
2038         let res = a.get_ref::<u8>(MAX).unwrap_err();
2039         assert_matches!(
2040             res,
2041             Error::Overflow {
2042                 base: MAX,
2043                 offset: 1,
2044             }
2045         );
2046     }
2047 
2048     #[test]
ref_oob_error()2049     fn ref_oob_error() {
2050         let mut backing = vec![0u8; 100];
2051         let a = VolatileSlice::from(backing.as_mut_slice());
2052         a.get_ref::<u8>(99).unwrap();
2053         let res = a.get_ref::<u16>(99).unwrap_err();
2054         assert_matches!(res, Error::OutOfBounds { addr: 101 });
2055     }
2056 
2057     #[test]
ref_oob_too_large()2058     fn ref_oob_too_large() {
2059         let mut backing = vec![0u8; 3];
2060         let a = VolatileSlice::from(backing.as_mut_slice());
2061         let res = a.get_ref::<u32>(0).unwrap_err();
2062         assert_matches!(res, Error::OutOfBounds { addr: 4 });
2063     }
2064 
2065     #[test]
slice_store()2066     fn slice_store() {
2067         let mut backing = vec![0u8; 5];
2068         let a = VolatileSlice::from(backing.as_mut_slice());
2069         let s = a.as_volatile_slice();
2070         let r = a.get_ref(2).unwrap();
2071         r.store(9u16);
2072         assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
2073     }
2074 
2075     #[test]
test_write_past_end()2076     fn test_write_past_end() {
2077         let mut backing = vec![0u8; 5];
2078         let a = VolatileSlice::from(backing.as_mut_slice());
2079         let s = a.as_volatile_slice();
2080         let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
2081         assert!(res.is_ok());
2082         assert_eq!(res.unwrap(), 5);
2083     }
2084 
2085     #[test]
slice_read_and_write()2086     fn slice_read_and_write() {
2087         let mut backing = vec![0u8; 5];
2088         let a = VolatileSlice::from(backing.as_mut_slice());
2089         let s = a.as_volatile_slice();
2090         let sample_buf = [1, 2, 3];
2091         assert!(s.write(&sample_buf, 5).is_err());
2092         assert!(s.write(&sample_buf, 2).is_ok());
2093         let mut buf = [0u8; 3];
2094         assert!(s.read(&mut buf, 5).is_err());
2095         assert!(s.read_slice(&mut buf, 2).is_ok());
2096         assert_eq!(buf, sample_buf);
2097 
2098         // Writing an empty buffer at the end of the volatile slice works.
2099         assert_eq!(s.write(&[], 100).unwrap(), 0);
2100         let buf: &mut [u8] = &mut [];
2101         assert_eq!(s.read(buf, 4).unwrap(), 0);
2102 
2103         // Check that reading and writing an empty buffer does not yield an error.
2104         let mut backing = Vec::new();
2105         let empty_mem = VolatileSlice::from(backing.as_mut_slice());
2106         let empty = empty_mem.as_volatile_slice();
2107         assert_eq!(empty.write(&[], 1).unwrap(), 0);
2108         assert_eq!(empty.read(buf, 1).unwrap(), 0);
2109     }
2110 
2111     #[test]
obj_read_and_write()2112     fn obj_read_and_write() {
2113         let mut backing = vec![0u8; 5];
2114         let a = VolatileSlice::from(backing.as_mut_slice());
2115         let s = a.as_volatile_slice();
2116         assert!(s.write_obj(55u16, 4).is_err());
2117         assert!(s.write_obj(55u16, core::usize::MAX).is_err());
2118         assert!(s.write_obj(55u16, 2).is_ok());
2119         assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
2120         assert!(s.read_obj::<u16>(4).is_err());
2121         assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
2122     }
2123 
2124     #[test]
mem_read_and_write()2125     fn mem_read_and_write() {
2126         let mut backing = vec![0u8; 5];
2127         let a = VolatileSlice::from(backing.as_mut_slice());
2128         let s = a.as_volatile_slice();
2129         assert!(s.write_obj(!0u32, 1).is_ok());
2130         let mut file = if cfg!(unix) {
2131             File::open(Path::new("/dev/zero")).unwrap()
2132         } else {
2133             File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
2134         };
2135         assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
2136         assert!(s
2137             .read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
2138             .is_err());
2139 
2140         assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
2141 
2142         let mut f = TempFile::new().unwrap().into_file();
2143         assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
2144         format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
2145 
2146         let value = s.read_obj::<u32>(1).unwrap();
2147         if cfg!(unix) {
2148             assert_eq!(value, 0);
2149         } else {
2150             assert_eq!(value, 0x0090_5a4d);
2151         }
2152 
2153         let mut sink = Vec::new();
2154         assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
2155         assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
2156         assert!(s
2157             .write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
2158             .is_err());
2159         format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
2160         if cfg!(unix) {
2161             assert_eq!(sink, vec![0; size_of::<u32>()]);
2162         } else {
2163             assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
2164         };
2165     }
2166 
2167     #[test]
unaligned_read_and_write()2168     fn unaligned_read_and_write() {
2169         let mut backing = vec![0u8; 7];
2170         let a = VolatileSlice::from(backing.as_mut_slice());
2171         let s = a.as_volatile_slice();
2172         let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
2173         assert!(s.write_slice(&sample_buf, 0).is_ok());
2174         let r = a.get_ref::<u32>(2).unwrap();
2175         assert_eq!(r.load(), 0xAAAA_AAAA);
2176 
2177         r.store(0x5555_5555);
2178         let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
2179         let mut buf: [u8; 7] = Default::default();
2180         assert!(s.read_slice(&mut buf, 0).is_ok());
2181         assert_eq!(buf, sample_buf);
2182     }
2183 
2184     #[test]
test_read_from_exceeds_size()2185     fn test_read_from_exceeds_size() {
2186         #[derive(Debug, Default, Copy, Clone)]
2187         struct BytesToRead {
2188             _val1: u128, // 16 bytes
2189             _val2: u128, // 16 bytes
2190         }
2191         unsafe impl ByteValued for BytesToRead {}
2192         let cursor_size = 20;
2193         let mut image = Cursor::new(vec![1u8; cursor_size]);
2194 
2195         // Trying to read more bytes than we have available in the cursor should
2196         // make the read_from function return maximum cursor size (i.e. 20).
2197         let mut bytes_to_read = BytesToRead::default();
2198         let size_of_bytes = size_of_val(&bytes_to_read);
2199         assert_eq!(
2200             bytes_to_read
2201                 .as_bytes()
2202                 .read_from(0, &mut image, size_of_bytes)
2203                 .unwrap(),
2204             cursor_size
2205         );
2206     }
2207 
2208     #[test]
ref_array_from_slice()2209     fn ref_array_from_slice() {
2210         let mut a = [2, 4, 6, 8, 10];
2211         let a_vec = a.to_vec();
2212         let a_ref = VolatileSlice::from(&mut a[..]);
2213         let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
2214         let a_array_ref: VolatileArrayRef<u8, ()> = a_slice.into();
2215         for (i, entry) in a_vec.iter().enumerate() {
2216             assert_eq!(&a_array_ref.load(i), entry);
2217         }
2218     }
2219 
2220     #[test]
ref_array_store()2221     fn ref_array_store() {
2222         let mut a = [0u8; 5];
2223         {
2224             let a_ref = VolatileSlice::from(&mut a[..]);
2225             let v_ref = a_ref.get_array_ref(1, 4).unwrap();
2226             v_ref.store(1, 2u8);
2227             v_ref.store(2, 4u8);
2228             v_ref.store(3, 6u8);
2229         }
2230         let expected = [2u8, 4u8, 6u8];
2231         assert_eq!(a[2..=4], expected);
2232     }
2233 
2234     #[test]
ref_array_load()2235     fn ref_array_load() {
2236         let mut a = [0, 0, 2, 3, 10];
2237         {
2238             let a_ref = VolatileSlice::from(&mut a[..]);
2239             let c = {
2240                 let v_ref = a_ref.get_array_ref::<u8>(1, 4).unwrap();
2241                 assert_eq!(v_ref.load(1), 2u8);
2242                 assert_eq!(v_ref.load(2), 3u8);
2243                 assert_eq!(v_ref.load(3), 10u8);
2244                 v_ref
2245             };
2246             // To make sure we can take a v_ref out of the scope we made it in:
2247             c.load(0);
2248             // but not too far:
2249             // c
2250         } //.load()
2251         ;
2252     }
2253 
2254     #[test]
ref_array_overflow()2255     fn ref_array_overflow() {
2256         let mut a = [0, 0, 2, 3, 10];
2257         let a_ref = VolatileSlice::from(&mut a[..]);
2258         let res = a_ref.get_array_ref::<u32>(4, usize::MAX).unwrap_err();
2259         assert_matches!(
2260             res,
2261             Error::TooBig {
2262                 nelements: usize::MAX,
2263                 size: 4,
2264             }
2265         );
2266     }
2267 
2268     #[test]
alignment()2269     fn alignment() {
2270         let a = [0u8; 64];
2271         let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize;
2272         assert!(super::alignment(a) >= 32);
2273         assert_eq!(super::alignment(a + 9), 1);
2274         assert_eq!(super::alignment(a + 30), 2);
2275         assert_eq!(super::alignment(a + 12), 4);
2276         assert_eq!(super::alignment(a + 8), 8);
2277     }
2278 
2279     #[test]
test_atomic_accesses()2280     fn test_atomic_accesses() {
2281         let len = 0x1000;
2282         let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
2283         let a = unsafe { VolatileSlice::new(buf, len) };
2284 
2285         crate::bytes::tests::check_atomic_accesses(a, 0, 0x1000);
2286         unsafe {
2287             std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
2288         }
2289     }
2290 
2291     #[test]
split_at()2292     fn split_at() {
2293         let mut mem = [0u8; 32];
2294         let mem_ref = VolatileSlice::from(&mut mem[..]);
2295         let vslice = mem_ref.get_slice(0, 32).unwrap();
2296         let (start, end) = vslice.split_at(8).unwrap();
2297         assert_eq!(start.len(), 8);
2298         assert_eq!(end.len(), 24);
2299         let (start, end) = vslice.split_at(0).unwrap();
2300         assert_eq!(start.len(), 0);
2301         assert_eq!(end.len(), 32);
2302         let (start, end) = vslice.split_at(31).unwrap();
2303         assert_eq!(start.len(), 31);
2304         assert_eq!(end.len(), 1);
2305         let (start, end) = vslice.split_at(32).unwrap();
2306         assert_eq!(start.len(), 32);
2307         assert_eq!(end.len(), 0);
2308         let err = vslice.split_at(33).unwrap_err();
2309         assert_matches!(err, Error::OutOfBounds { addr: _ })
2310     }
2311 
2312     #[test]
test_volatile_slice_dirty_tracking()2313     fn test_volatile_slice_dirty_tracking() {
2314         let val = 123u64;
2315         let dirty_offset = 0x1000;
2316         let dirty_len = size_of_val(&val);
2317         let page_size = 0x1000;
2318 
2319         let len = 0x10000;
2320         let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
2321 
2322         // Invoke the `Bytes` test helper function.
2323         {
2324             let bitmap = AtomicBitmap::new(len, page_size);
2325             let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2326 
2327             test_bytes(
2328                 &slice,
2329                 |s: &VolatileSlice<RefSlice<AtomicBitmap>>,
2330                  start: usize,
2331                  len: usize,
2332                  clean: bool| { check_range(s.bitmap(), start, len, clean) },
2333                 |offset| offset,
2334                 0x1000,
2335             );
2336         }
2337 
2338         // Invoke the `VolatileMemory` test helper function.
2339         {
2340             let bitmap = AtomicBitmap::new(len, page_size);
2341             let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2342             test_volatile_memory(&slice);
2343         }
2344 
2345         let bitmap = AtomicBitmap::new(len, page_size);
2346         let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2347 
2348         let bitmap2 = AtomicBitmap::new(len, page_size);
2349         let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) };
2350 
2351         let bitmap3 = AtomicBitmap::new(len, page_size);
2352         let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) };
2353 
2354         assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2355         assert!(range_is_clean(slice2.bitmap(), 0, slice2.len()));
2356 
2357         slice.write_obj(val, dirty_offset).unwrap();
2358         assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2359 
2360         slice.copy_to_volatile_slice(slice2);
2361         assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len()));
2362 
2363         {
2364             let (s1, s2) = slice.split_at(dirty_offset).unwrap();
2365             assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
2366             assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
2367         }
2368 
2369         {
2370             let s = slice.subslice(dirty_offset, dirty_len).unwrap();
2371             assert!(range_is_dirty(s.bitmap(), 0, s.len()));
2372         }
2373 
2374         {
2375             let s = slice.offset(dirty_offset).unwrap();
2376             assert!(range_is_dirty(s.bitmap(), 0, dirty_len));
2377         }
2378 
2379         // Test `copy_from` for size_of::<T> == 1.
2380         {
2381             let buf = vec![1u8; dirty_offset];
2382 
2383             assert!(range_is_clean(slice.bitmap(), 0, dirty_offset));
2384             slice.copy_from(&buf);
2385             assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset));
2386         }
2387 
2388         // Test `copy_from` for size_of::<T> > 1.
2389         {
2390             let val = 1u32;
2391             let buf = vec![val; dirty_offset / size_of_val(&val)];
2392 
2393             assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset));
2394             slice3.copy_from(&buf);
2395             assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset));
2396         }
2397 
2398         unsafe {
2399             std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
2400         }
2401     }
2402 
2403     #[test]
test_volatile_ref_dirty_tracking()2404     fn test_volatile_ref_dirty_tracking() {
2405         let val = 123u64;
2406         let mut buf = vec![val];
2407         let page_size = 0x1000;
2408 
2409         let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
2410         let vref = unsafe {
2411             VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None)
2412         };
2413 
2414         assert!(range_is_clean(vref.bitmap(), 0, vref.len()));
2415         vref.store(val);
2416         assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
2417     }
2418 
test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize) where T: ByteValued + From<u8>,2419     fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
2420     where
2421         T: ByteValued + From<u8>,
2422     {
2423         let bitmap = AtomicBitmap::new(size_of_val(buf), page_size);
2424         let arr = unsafe {
2425             VolatileArrayRef::with_bitmap(
2426                 buf.as_mut_ptr() as *mut u8,
2427                 index + 1,
2428                 bitmap.slice_at(0),
2429                 None,
2430             )
2431         };
2432 
2433         let val = T::from(123);
2434         let copy_buf = vec![val; index + 1];
2435 
2436         assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::<T>()));
2437         arr.copy_from(copy_buf.as_slice());
2438         assert!(range_is_dirty(arr.bitmap(), 0, size_of_val(buf)));
2439     }
2440 
2441     #[test]
test_volatile_array_ref_dirty_tracking()2442     fn test_volatile_array_ref_dirty_tracking() {
2443         let val = 123u64;
2444         let dirty_len = size_of_val(&val);
2445         let index = 0x1000;
2446         let dirty_offset = dirty_len * index;
2447         let page_size = 0x1000;
2448 
2449         let mut buf = vec![0u64; index + 1];
2450         let mut byte_buf = vec![0u8; index + 1];
2451 
2452         // Test `ref_at`.
2453         {
2454             let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
2455             let arr = unsafe {
2456                 VolatileArrayRef::with_bitmap(
2457                     buf.as_mut_ptr() as *mut u8,
2458                     index + 1,
2459                     bitmap.slice_at(0),
2460                     None,
2461                 )
2462             };
2463 
2464             assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len));
2465             arr.ref_at(index).store(val);
2466             assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len));
2467         }
2468 
2469         // Test `store`.
2470         {
2471             let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
2472             let arr = unsafe {
2473                 VolatileArrayRef::with_bitmap(
2474                     buf.as_mut_ptr() as *mut u8,
2475                     index + 1,
2476                     bitmap.slice_at(0),
2477                     None,
2478                 )
2479             };
2480 
2481             let slice = arr.to_slice();
2482             assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2483             arr.store(index, val);
2484             assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2485         }
2486 
2487         // Test `copy_from` when size_of::<T>() == 1.
2488         test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
2489         // Test `copy_from` when size_of::<T>() > 1.
2490         test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
2491     }
2492 }
2493