1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::min;
6 use std::fs::File;
7 use std::intrinsics::copy_nonoverlapping;
8 use std::io;
9 use std::mem::size_of;
10 use std::ptr::read_unaligned;
11 use std::ptr::read_volatile;
12 use std::ptr::write_unaligned;
13 use std::ptr::write_volatile;
14 use std::sync::atomic::fence;
15 use std::sync::atomic::Ordering;
16 use std::sync::OnceLock;
17
18 use remain::sorted;
19 use serde::Deserialize;
20 use serde::Serialize;
21 use zerocopy::AsBytes;
22 use zerocopy::FromBytes;
23
24 use crate::descriptor::AsRawDescriptor;
25 use crate::descriptor::SafeDescriptor;
26 use crate::platform::MemoryMapping as PlatformMmap;
27 use crate::SharedMemory;
28 use crate::VolatileMemory;
29 use crate::VolatileMemoryError;
30 use crate::VolatileMemoryResult;
31 use crate::VolatileSlice;
32
33 static CACHELINE_SIZE: OnceLock<usize> = OnceLock::new();
34
35 #[allow(unused_assignments)]
get_cacheline_size_once() -> usize36 fn get_cacheline_size_once() -> usize {
37 let mut assume_reason: &str = "unknown";
38 cfg_if::cfg_if! {
39 if #[cfg(all(any(target_os = "android", target_os = "linux"), not(target_env = "musl")))] {
40 // TODO: Remove once available in libc bindings
41 #[cfg(target_os = "android")]
42 const _SC_LEVEL1_DCACHE_LINESIZE: i32 = 0x0094;
43 #[cfg(target_os = "linux")]
44 use libc::_SC_LEVEL1_DCACHE_LINESIZE;
45
46 // SAFETY:
47 // Safe because we check the return value for errors or unsupported requests
48 let linesize = unsafe { libc::sysconf(_SC_LEVEL1_DCACHE_LINESIZE) };
49 if linesize > 0 {
50 return linesize as usize;
51 } else {
52 assume_reason = "sysconf cacheline size query failed";
53 }
54 } else {
55 assume_reason = "cacheline size query not implemented for platform/arch";
56 }
57 }
58
59 let assumed_size = 64;
60 log::debug!(
61 "assuming cacheline_size={}; reason: {}.",
62 assumed_size,
63 assume_reason
64 );
65 assumed_size
66 }
67
68 /// Returns the system's effective cacheline size (e.g. the granularity at which arch-specific
69 /// cacheline management, such as with the clflush instruction, is expected to occur).
70 #[inline(always)]
get_cacheline_size() -> usize71 fn get_cacheline_size() -> usize {
72 let size = *CACHELINE_SIZE.get_or_init(get_cacheline_size_once);
73 assert!(size > 0);
74 size
75 }
76
77 #[sorted]
78 #[derive(Debug, thiserror::Error)]
79 pub enum Error {
80 #[error("`add_fd_mapping` is unsupported")]
81 AddFdMappingIsUnsupported,
82 #[error("requested memory out of range")]
83 InvalidAddress,
84 #[error("requested alignment is incompatible")]
85 InvalidAlignment,
86 #[error("invalid argument provided when creating mapping")]
87 InvalidArgument,
88 #[error("requested offset is out of range of off_t")]
89 InvalidOffset,
90 #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
91 InvalidRange(usize, usize, usize),
92 #[error("operation is not implemented on platform/architecture: {0}")]
93 NotImplemented(&'static str),
94 #[error("requested memory is not page aligned")]
95 NotPageAligned,
96 #[error("failed to read from file to memory: {0}")]
97 ReadToMemory(#[source] io::Error),
98 #[error("`remove_mapping` is unsupported")]
99 RemoveMappingIsUnsupported,
100 #[error("system call failed while creating the mapping: {0}")]
101 StdSyscallFailed(io::Error),
102 #[error("mmap related system call failed: {0}")]
103 SystemCallFailed(#[source] crate::Error),
104 #[error("failed to write from memory to file: {0}")]
105 WriteFromMemory(#[source] io::Error),
106 }
107 pub type Result<T> = std::result::Result<T, Error>;
108
109 /// Memory access type for anonymous shared memory mapping.
110 #[derive(Copy, Clone, Default, Eq, PartialEq, Serialize, Deserialize, Debug)]
111 pub struct Protection {
112 pub(crate) read: bool,
113 pub(crate) write: bool,
114 }
115
116 impl Protection {
117 /// Returns Protection allowing read/write access.
118 #[inline(always)]
read_write() -> Protection119 pub fn read_write() -> Protection {
120 Protection {
121 read: true,
122 write: true,
123 }
124 }
125
126 /// Returns Protection allowing read access.
127 #[inline(always)]
read() -> Protection128 pub fn read() -> Protection {
129 Protection {
130 read: true,
131 ..Default::default()
132 }
133 }
134
135 /// Returns Protection allowing write access.
136 #[inline(always)]
write() -> Protection137 pub fn write() -> Protection {
138 Protection {
139 write: true,
140 ..Default::default()
141 }
142 }
143
144 /// Set read events.
145 #[inline(always)]
set_read(self) -> Protection146 pub fn set_read(self) -> Protection {
147 Protection { read: true, ..self }
148 }
149
150 /// Set write events.
151 #[inline(always)]
set_write(self) -> Protection152 pub fn set_write(self) -> Protection {
153 Protection {
154 write: true,
155 ..self
156 }
157 }
158
159 /// Returns true if all access allowed by |other| is also allowed by |self|.
160 #[inline(always)]
allows(&self, other: &Protection) -> bool161 pub fn allows(&self, other: &Protection) -> bool {
162 self.read >= other.read && self.write >= other.write
163 }
164 }
165
166 /// See [MemoryMapping](crate::platform::MemoryMapping) for struct- and method-level
167 /// documentation.
168 #[derive(Debug)]
169 pub struct MemoryMapping {
170 pub(crate) mapping: PlatformMmap,
171
172 // File backed mappings on Windows need to keep the underlying file open while the mapping is
173 // open.
174 // This will be a None in non-windows case. The variable will not be read so the '^_'.
175 //
176 // TODO(b:230902713) There was a concern about relying on the kernel's refcounting to keep the
177 // file object's locks (e.g. exclusive read/write) in place. We need to revisit/validate that
178 // concern.
179 pub(crate) _file_descriptor: Option<SafeDescriptor>,
180 }
181
182 #[inline(always)]
flush_one(_addr: *const u8) -> Result<()>183 unsafe fn flush_one(_addr: *const u8) -> Result<()> {
184 cfg_if::cfg_if! {
185 if #[cfg(target_arch = "x86_64")] {
186 // As per table 11-7 of the SDM, processors are not required to
187 // snoop UC mappings, so flush the target to memory.
188 // SAFETY: assumes that the caller has supplied a valid address.
189 unsafe { core::arch::x86_64::_mm_clflush(_addr) };
190 Ok(())
191 } else if #[cfg(target_arch = "aarch64")] {
192 // Data cache clean by VA to PoC.
193 std::arch::asm!("DC CVAC, {x}", x = in(reg) _addr);
194 Ok(())
195 } else if #[cfg(target_arch = "arm")] {
196 Err(Error::NotImplemented("Userspace cannot flush to PoC"))
197 } else {
198 Err(Error::NotImplemented("Cache flush not implemented"))
199 }
200 }
201 }
202
203 impl MemoryMapping {
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>204 pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
205 match self.mapping.size().checked_sub(offset) {
206 Some(size_past_offset) => {
207 let bytes_copied = min(size_past_offset, buf.len());
208 // SAFETY:
209 // The bytes_copied equation above ensures we don't copy bytes out of range of
210 // either buf or this slice. We also know that the buffers do not overlap because
211 // slices can never occupy the same memory as a volatile slice.
212 unsafe {
213 copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
214 }
215 Ok(bytes_copied)
216 }
217 None => Err(Error::InvalidAddress),
218 }
219 }
220
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>221 pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
222 match self.size().checked_sub(offset) {
223 Some(size_past_offset) => {
224 let bytes_copied = min(size_past_offset, buf.len());
225 // SAFETY:
226 // The bytes_copied equation above ensures we don't copy bytes out of range of
227 // either buf or this slice. We also know that the buffers do not overlap because
228 // slices can never occupy the same memory as a volatile slice.
229 unsafe {
230 copy_nonoverlapping(self.as_ptr().add(offset), buf.as_mut_ptr(), bytes_copied);
231 }
232 Ok(bytes_copied)
233 }
234 None => Err(Error::InvalidAddress),
235 }
236 }
237
238 /// Writes an object to the memory region at the specified offset.
239 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
240 ///
241 /// This method is for writing to regular memory. If writing to a mapped
242 /// I/O region, use [`MemoryMapping::write_obj_volatile`].
243 ///
244 /// # Examples
245 /// * Write a u64 at offset 16.
246 ///
247 /// ```
248 /// # use base::MemoryMappingBuilder;
249 /// # use base::SharedMemory;
250 /// # let shm = SharedMemory::new("test", 1024).unwrap();
251 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
252 /// let res = mem_map.write_obj(55u64, 16);
253 /// assert!(res.is_ok());
254 /// ```
write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>255 pub fn write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
256 self.mapping.range_end(offset, size_of::<T>())?;
257 // SAFETY:
258 // This is safe because we checked the bounds above.
259 unsafe {
260 write_unaligned(self.as_ptr().add(offset) as *mut T, val);
261 }
262 Ok(())
263 }
264
265 /// Reads on object from the memory region at the given offset.
266 /// Reading from a volatile area isn't strictly safe as it could change
267 /// mid-read. However, as long as the type T is plain old data and can
268 /// handle random initialization, everything will be OK.
269 ///
270 /// This method is for reading from regular memory. If reading from a
271 /// mapped I/O region, use [`MemoryMapping::read_obj_volatile`].
272 ///
273 /// # Examples
274 /// * Read a u64 written to offset 32.
275 ///
276 /// ```
277 /// # use base::MemoryMappingBuilder;
278 /// # let mut mem_map = MemoryMappingBuilder::new(1024).build().unwrap();
279 /// let res = mem_map.write_obj(55u64, 32);
280 /// assert!(res.is_ok());
281 /// let num: u64 = mem_map.read_obj(32).unwrap();
282 /// assert_eq!(55, num);
283 /// ```
read_obj<T: FromBytes>(&self, offset: usize) -> Result<T>284 pub fn read_obj<T: FromBytes>(&self, offset: usize) -> Result<T> {
285 self.mapping.range_end(offset, size_of::<T>())?;
286 // SAFETY:
287 // This is safe because by definition Copy types can have their bits set arbitrarily and
288 // still be valid.
289 unsafe {
290 Ok(read_unaligned(
291 self.as_ptr().add(offset) as *const u8 as *const T
292 ))
293 }
294 }
295
296 /// Writes an object to the memory region at the specified offset.
297 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
298 ///
299 /// The write operation will be volatile, i.e. it will not be reordered by
300 /// the compiler and is suitable for I/O, but must be aligned. When writing
301 /// to regular memory, prefer [`MemoryMapping::write_obj`].
302 ///
303 /// # Examples
304 /// * Write a u32 at offset 16.
305 ///
306 /// ```
307 /// # use base::MemoryMappingBuilder;
308 /// # use base::SharedMemory;
309 /// # let shm = SharedMemory::new("test", 1024).unwrap();
310 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
311 /// let res = mem_map.write_obj_volatile(0xf00u32, 16);
312 /// assert!(res.is_ok());
313 /// ```
write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>314 pub fn write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
315 self.mapping.range_end(offset, size_of::<T>())?;
316 // Make sure writes to memory have been committed before performing I/O that could
317 // potentially depend on them.
318 fence(Ordering::SeqCst);
319 // SAFETY:
320 // This is safe because we checked the bounds above.
321 unsafe {
322 write_volatile(self.as_ptr().add(offset) as *mut T, val);
323 }
324 Ok(())
325 }
326
327 /// Reads on object from the memory region at the given offset.
328 /// Reading from a volatile area isn't strictly safe as it could change
329 /// mid-read. However, as long as the type T is plain old data and can
330 /// handle random initialization, everything will be OK.
331 ///
332 /// The read operation will be volatile, i.e. it will not be reordered by
333 /// the compiler and is suitable for I/O, but must be aligned. When reading
334 /// from regular memory, prefer [`MemoryMapping::read_obj`].
335 ///
336 /// # Examples
337 /// * Read a u32 written to offset 16.
338 ///
339 /// ```
340 /// # use base::MemoryMappingBuilder;
341 /// # use base::SharedMemory;
342 /// # let shm = SharedMemory::new("test", 1024).unwrap();
343 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
344 /// let res = mem_map.write_obj(0xf00u32, 16);
345 /// assert!(res.is_ok());
346 /// let num: u32 = mem_map.read_obj_volatile(16).unwrap();
347 /// assert_eq!(0xf00, num);
348 /// ```
read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T>349 pub fn read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T> {
350 self.mapping.range_end(offset, size_of::<T>())?;
351 // SAFETY:
352 // This is safe because by definition Copy types can have their bits set arbitrarily and
353 // still be valid.
354 unsafe {
355 Ok(read_volatile(
356 self.as_ptr().add(offset) as *const u8 as *const T
357 ))
358 }
359 }
360
msync(&self) -> Result<()>361 pub fn msync(&self) -> Result<()> {
362 self.mapping.msync()
363 }
364
365 /// Flush a region of the MemoryMapping from the system's caching hierarchy.
366 /// There are several uses for flushing:
367 ///
368 /// * Cached memory which the guest may be reading through an uncached mapping:
369 ///
370 /// Guest reads via an uncached mapping can bypass the cache and directly access main
371 /// memory. This is outside the memory model of Rust, which means that even with proper
372 /// synchronization, guest reads via an uncached mapping might not see updates from the
373 /// host. As such, it is necessary to perform architectural cache maintainance to flush the
374 /// host writes to main memory.
375 ///
376 /// Note that this does not support writable uncached guest mappings, as doing so
377 /// requires invalidating the cache, not flushing the cache.
378 ///
379 /// * Uncached memory which the guest may be writing through a cached mapping:
380 ///
381 /// Guest writes via a cached mapping of a host's uncached memory may never make it to
382 /// system/device memory prior to being read. In such cases, explicit flushing of the cached
383 /// writes is necessary, since other managers of the host's uncached mapping (e.g. DRM) see
384 /// no need to flush, as they believe all writes would explicitly bypass the caches.
385 ///
386 /// Currently only supported on x86_64 and aarch64. Cannot be supported on 32-bit arm.
flush_region(&self, offset: usize, len: usize) -> Result<()>387 pub fn flush_region(&self, offset: usize, len: usize) -> Result<()> {
388 let addr: *const u8 = self.as_ptr();
389 let size = self.size();
390
391 // disallow overflow/wrapping ranges and subregion extending beyond mapped range
392 if usize::MAX - size < addr as usize || offset >= size || size - offset < len {
393 return Err(Error::InvalidRange(offset, len, size));
394 }
395
396 // SAFETY:
397 // Safe because already validated that `next` will be an address in the mapping:
398 // * mapped region is non-wrapping
399 // * subregion is bounded within the mapped region
400 let mut next: *const u8 = unsafe { addr.add(offset) };
401
402 let cacheline_size = get_cacheline_size();
403 let cacheline_count = len.div_ceil(cacheline_size);
404
405 for _ in 0..cacheline_count {
406 // SAFETY:
407 // Safe because `next` is guaranteed to be within the mapped region (see earlier
408 // validations), and flushing the cache doesn't affect any rust safety properties.
409 unsafe { flush_one(next)? };
410
411 // SAFETY:
412 // Safe because we never use next if it goes out of the mapped region or overflows its
413 // storage type (based on earlier validations and the loop bounds).
414 next = unsafe { next.add(cacheline_size) };
415 }
416 Ok(())
417 }
418
419 /// Flush all backing memory for a mapping in an arch-specific manner (see `flush_region()`).
flush_all(&self) -> Result<()>420 pub fn flush_all(&self) -> Result<()> {
421 self.flush_region(0, self.size())
422 }
423 }
424
425 pub struct MemoryMappingBuilder<'a> {
426 pub(crate) descriptor: Option<&'a dyn AsRawDescriptor>,
427 pub(crate) is_file_descriptor: bool,
428 #[cfg_attr(target_os = "macos", allow(unused))]
429 pub(crate) size: usize,
430 pub(crate) offset: Option<u64>,
431 pub(crate) align: Option<u64>,
432 pub(crate) protection: Option<Protection>,
433 #[cfg_attr(target_os = "macos", allow(unused))]
434 #[cfg_attr(windows, allow(unused))]
435 pub(crate) populate: bool,
436 }
437
438 /// Builds a MemoryMapping object from the specified arguments.
439 impl<'a> MemoryMappingBuilder<'a> {
440 /// Creates a new builder specifying size of the memory region in bytes.
new(size: usize) -> MemoryMappingBuilder<'a>441 pub fn new(size: usize) -> MemoryMappingBuilder<'a> {
442 MemoryMappingBuilder {
443 descriptor: None,
444 size,
445 is_file_descriptor: false,
446 offset: None,
447 align: None,
448 protection: None,
449 populate: false,
450 }
451 }
452
453 /// Build the memory mapping given the specified File to mapped memory
454 ///
455 /// Default: Create a new memory mapping.
456 ///
457 /// Note: this is a forward looking interface to accomodate platforms that
458 /// require special handling for file backed mappings.
459 #[allow(clippy::wrong_self_convention, unused_mut)]
from_file(mut self, file: &'a File) -> MemoryMappingBuilder460 pub fn from_file(mut self, file: &'a File) -> MemoryMappingBuilder {
461 // On Windows, files require special handling (next day shipping if possible).
462 self.is_file_descriptor = true;
463
464 self.descriptor = Some(file as &dyn AsRawDescriptor);
465 self
466 }
467
468 /// Build the memory mapping given the specified SharedMemory to mapped memory
469 ///
470 /// Default: Create a new memory mapping.
from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder471 pub fn from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder {
472 self.descriptor = Some(shm as &dyn AsRawDescriptor);
473 self
474 }
475
476 /// Offset in bytes from the beginning of the mapping to start the mmap.
477 ///
478 /// Default: No offset
offset(mut self, offset: u64) -> MemoryMappingBuilder<'a>479 pub fn offset(mut self, offset: u64) -> MemoryMappingBuilder<'a> {
480 self.offset = Some(offset);
481 self
482 }
483
484 /// Protection (e.g. readable/writable) of the memory region.
485 ///
486 /// Default: Read/write
protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a>487 pub fn protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a> {
488 self.protection = Some(protection);
489 self
490 }
491
492 /// Alignment of the memory region mapping in bytes.
493 ///
494 /// Default: No alignment
align(mut self, alignment: u64) -> MemoryMappingBuilder<'a>495 pub fn align(mut self, alignment: u64) -> MemoryMappingBuilder<'a> {
496 self.align = Some(alignment);
497 self
498 }
499 }
500
501 impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>502 fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
503 let mem_end = offset
504 .checked_add(count)
505 .ok_or(VolatileMemoryError::Overflow {
506 base: offset,
507 offset: count,
508 })?;
509
510 if mem_end > self.size() {
511 return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
512 }
513
514 let new_addr =
515 (self.as_ptr() as usize)
516 .checked_add(offset)
517 .ok_or(VolatileMemoryError::Overflow {
518 base: self.as_ptr() as usize,
519 offset,
520 })?;
521
522 // SAFETY:
523 // Safe because we checked that offset + count was within our range and we only ever hand
524 // out volatile accessors.
525 Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
526 }
527 }
528
529 /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
530 ///
531 /// # Safety
532 /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
533 /// can't be unmapped during the `MappedRegion`'s lifetime.
534 pub unsafe trait MappedRegion: Send + Sync {
535 // SAFETY:
536 /// Returns a pointer to the beginning of the memory region. Should only be
537 /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8538 fn as_ptr(&self) -> *mut u8;
539
540 /// Returns the size of the memory region in bytes.
size(&self) -> usize541 fn size(&self) -> usize;
542
543 /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
544 /// at `offset` bytes from the start of the region with `prot` protections.
545 /// `offset` must be page aligned.
546 ///
547 /// # Arguments
548 /// * `offset` - Page aligned offset into the arena in bytes.
549 /// * `size` - Size of memory region in bytes.
550 /// * `fd` - File descriptor to mmap from.
551 /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
552 /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawDescriptor, _fd_offset: u64, _prot: Protection, ) -> Result<()>553 fn add_fd_mapping(
554 &mut self,
555 _offset: usize,
556 _size: usize,
557 _fd: &dyn AsRawDescriptor,
558 _fd_offset: u64,
559 _prot: Protection,
560 ) -> Result<()> {
561 Err(Error::AddFdMappingIsUnsupported)
562 }
563
564 /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>565 fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
566 Err(Error::RemoveMappingIsUnsupported)
567 }
568 }
569
570 // SAFETY:
571 // Safe because it exclusively forwards calls to a safe implementation.
572 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8573 fn as_ptr(&self) -> *mut u8 {
574 self.mapping.as_ptr()
575 }
576
size(&self) -> usize577 fn size(&self) -> usize {
578 self.mapping.size()
579 }
580 }
581
582 #[derive(Debug, PartialEq, Eq)]
583 pub struct ExternalMapping {
584 pub ptr: u64,
585 pub size: usize,
586 }
587
588 // SAFETY:
589 // `ptr`..`ptr+size` is an mmaped region and is owned by this object. Caller
590 // needs to ensure that the region is not unmapped during the `MappedRegion`'s
591 // lifetime.
592 unsafe impl MappedRegion for ExternalMapping {
593 /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8594 fn as_ptr(&self) -> *mut u8 {
595 self.ptr as *mut u8
596 }
597
598 /// Returns the size of the memory region in bytes.
size(&self) -> usize599 fn size(&self) -> usize {
600 self.size
601 }
602 }
603