1 // Copyright 2023 Linaro Ltd. All Rights Reserved.
2 // Viresh Kumar <[email protected]>
3 //
4 // Xen specific memory mapping implementations
5 //
6 // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
7
8 //! Helper structure for working with mmap'ed memory regions on Xen.
9
10 use bitflags::bitflags;
11 use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE};
12 use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result};
13
14 use vmm_sys_util::{
15 fam::{Error as FamError, FamStruct, FamStructWrapper},
16 generate_fam_struct_impl,
17 ioctl::{ioctl_expr, _IOC_NONE},
18 };
19
20 // Use a dummy ioctl implementation for tests instead.
21 #[cfg(not(test))]
22 use vmm_sys_util::ioctl::ioctl_with_ref;
23
24 #[cfg(test)]
25 use tests::ioctl_with_ref;
26
27 use crate::bitmap::{Bitmap, BS};
28 use crate::guest_memory::{FileOffset, GuestAddress};
29 use crate::mmap::{check_file_offset, NewBitmap};
30 use crate::volatile_memory::{self, VolatileMemory, VolatileSlice};
31
32 /// Error conditions that may arise when creating a new `MmapRegion` object.
33 #[derive(Debug, thiserror::Error)]
34 pub enum Error {
35 /// The specified file offset and length cause overflow when added.
36 #[error("The specified file offset and length cause overflow when added")]
37 InvalidOffsetLength,
38 /// The forbidden `MAP_FIXED` flag was specified.
39 #[error("The forbidden `MAP_FIXED` flag was specified")]
40 MapFixed,
41 /// A mapping with offset + length > EOF was attempted.
42 #[error("The specified file offset and length is greater then file length")]
43 MappingPastEof,
44 /// The `mmap` call returned an error.
45 #[error("{0}")]
46 Mmap(io::Error),
47 /// Seeking the end of the file returned an error.
48 #[error("Error seeking the end of the file: {0}")]
49 SeekEnd(io::Error),
50 /// Seeking the start of the file returned an error.
51 #[error("Error seeking the start of the file: {0}")]
52 SeekStart(io::Error),
53 /// Invalid file offset.
54 #[error("Invalid file offset")]
55 InvalidFileOffset,
56 /// Memory mapped in advance.
57 #[error("Memory mapped in advance")]
58 MappedInAdvance,
59 /// Invalid Xen mmap flags.
60 #[error("Invalid Xen Mmap flags: {0:x}")]
61 MmapFlags(u32),
62 /// Fam error.
63 #[error("Fam error: {0}")]
64 Fam(FamError),
65 /// Unexpected error.
66 #[error("Unexpected error")]
67 UnexpectedError,
68 }
69
70 type Result<T> = result::Result<T, Error>;
71
72 /// `MmapRange` represents a range of arguments required to create Mmap regions.
73 #[derive(Clone, Debug)]
74 pub struct MmapRange {
75 size: usize,
76 file_offset: Option<FileOffset>,
77 prot: Option<i32>,
78 flags: Option<i32>,
79 hugetlbfs: Option<bool>,
80 addr: GuestAddress,
81 mmap_flags: u32,
82 mmap_data: u32,
83 }
84
85 impl MmapRange {
86 /// Creates instance of the range with multiple arguments.
new( size: usize, file_offset: Option<FileOffset>, addr: GuestAddress, mmap_flags: u32, mmap_data: u32, ) -> Self87 pub fn new(
88 size: usize,
89 file_offset: Option<FileOffset>,
90 addr: GuestAddress,
91 mmap_flags: u32,
92 mmap_data: u32,
93 ) -> Self {
94 Self {
95 size,
96 file_offset,
97 prot: None,
98 flags: None,
99 hugetlbfs: None,
100 addr,
101 mmap_flags,
102 mmap_data,
103 }
104 }
105
106 /// Creates instance of the range for `MmapXenFlags::UNIX` type mapping.
new_unix(size: usize, file_offset: Option<FileOffset>, addr: GuestAddress) -> Self107 pub fn new_unix(size: usize, file_offset: Option<FileOffset>, addr: GuestAddress) -> Self {
108 let flags = Some(match file_offset {
109 Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED,
110 None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
111 });
112
113 Self {
114 size,
115 file_offset,
116 prot: None,
117 flags,
118 hugetlbfs: None,
119 addr,
120 mmap_flags: MmapXenFlags::UNIX.bits(),
121 mmap_data: 0,
122 }
123 }
124
125 /// Set the prot of the range.
set_prot(&mut self, prot: i32)126 pub fn set_prot(&mut self, prot: i32) {
127 self.prot = Some(prot)
128 }
129
130 /// Set the flags of the range.
set_flags(&mut self, flags: i32)131 pub fn set_flags(&mut self, flags: i32) {
132 self.flags = Some(flags)
133 }
134
135 /// Set the hugetlbfs of the range.
set_hugetlbfs(&mut self, hugetlbfs: bool)136 pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
137 self.hugetlbfs = Some(hugetlbfs)
138 }
139 }
140
141 /// Helper structure for working with mmaped memory regions with Xen.
142 ///
143 /// The structure is used for accessing the guest's physical memory by mmapping it into
144 /// the current process.
145 ///
146 /// # Limitations
147 /// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's
148 /// physical memory may be mapped into the current process due to the limited virtual address
149 /// space size of the process.
150 #[derive(Debug)]
151 pub struct MmapRegion<B = ()> {
152 bitmap: B,
153 size: usize,
154 prot: i32,
155 flags: i32,
156 file_offset: Option<FileOffset>,
157 hugetlbfs: Option<bool>,
158 mmap: MmapXen,
159 }
160
161 // SAFETY: Send and Sync aren't automatically inherited for the raw address pointer.
162 // Accessing that pointer is only done through the stateless interface which
163 // allows the object to be shared by multiple threads without a decrease in
164 // safety.
165 unsafe impl<B: Send> Send for MmapRegion<B> {}
166 // SAFETY: See comment above.
167 unsafe impl<B: Sync> Sync for MmapRegion<B> {}
168
169 impl<B: NewBitmap> MmapRegion<B> {
170 /// Creates a shared anonymous mapping of `size` bytes.
171 ///
172 /// # Arguments
173 /// * `range` - An instance of type `MmapRange`.
174 ///
175 /// # Examples
176 /// * Write a slice at guest address 0x1200 with Xen's Grant mapping.
177 ///
178 /// ```
179 /// use std::fs::File;
180 /// use std::path::Path;
181 /// use vm_memory::{
182 /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
183 /// MmapXenFlags,
184 /// };
185 /// # use vmm_sys_util::tempfile::TempFile;
186 ///
187 /// let addr = GuestAddress(0x1000);
188 /// # if false {
189 /// let file = Some(FileOffset::new(
190 /// File::open(Path::new("/dev/xen/gntdev")).expect("Could not open file"),
191 /// 0,
192 /// ));
193 ///
194 /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::GRANT.bits(), 0);
195 /// # }
196 /// # // We need a UNIX mapping for tests to succeed.
197 /// # let range = MmapRange::new_unix(0x400, None, addr);
198 ///
199 /// let r = GuestRegionMmap::new(
200 /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
201 /// addr,
202 /// )
203 /// .expect("Could not create guest region");
204 ///
205 /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
206 /// let res = gm
207 /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
208 /// .expect("Could not write to guest memory");
209 /// assert_eq!(5, res);
210 /// ```
211 ///
212 /// * Write a slice at guest address 0x1200 with Xen's Foreign mapping.
213 ///
214 /// ```
215 /// use std::fs::File;
216 /// use std::path::Path;
217 /// use vm_memory::{
218 /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
219 /// MmapXenFlags,
220 /// };
221 /// # use vmm_sys_util::tempfile::TempFile;
222 ///
223 /// let addr = GuestAddress(0x1000);
224 /// # if false {
225 /// let file = Some(FileOffset::new(
226 /// File::open(Path::new("/dev/xen/privcmd")).expect("Could not open file"),
227 /// 0,
228 /// ));
229 ///
230 /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::FOREIGN.bits(), 0);
231 /// # }
232 /// # // We need a UNIX mapping for tests to succeed.
233 /// # let range = MmapRange::new_unix(0x400, None, addr);
234 ///
235 /// let r = GuestRegionMmap::new(
236 /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
237 /// addr,
238 /// )
239 /// .expect("Could not create guest region");
240 ///
241 /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
242 /// let res = gm
243 /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
244 /// .expect("Could not write to guest memory");
245 /// assert_eq!(5, res);
246 /// ```
from_range(mut range: MmapRange) -> Result<Self>247 pub fn from_range(mut range: MmapRange) -> Result<Self> {
248 if range.prot.is_none() {
249 range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
250 }
251
252 match range.flags {
253 Some(flags) => {
254 if flags & libc::MAP_FIXED != 0 {
255 // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous
256 // in general.
257 return Err(Error::MapFixed);
258 }
259 }
260 None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED),
261 }
262
263 let mmap = MmapXen::new(&range)?;
264
265 Ok(MmapRegion {
266 bitmap: B::with_len(range.size),
267 size: range.size,
268 prot: range.prot.ok_or(Error::UnexpectedError)?,
269 flags: range.flags.ok_or(Error::UnexpectedError)?,
270 file_offset: range.file_offset,
271 hugetlbfs: range.hugetlbfs,
272 mmap,
273 })
274 }
275 }
276
277 impl<B: Bitmap> MmapRegion<B> {
278 /// Returns a pointer to the beginning of the memory region. Mutable accesses performed
279 /// using the resulting pointer are not automatically accounted for by the dirty bitmap
280 /// tracking functionality.
281 ///
282 /// Should only be used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8283 pub fn as_ptr(&self) -> *mut u8 {
284 self.mmap.addr()
285 }
286
287 /// Returns the size of this region.
size(&self) -> usize288 pub fn size(&self) -> usize {
289 self.size
290 }
291
292 /// Returns information regarding the offset into the file backing this region (if any).
file_offset(&self) -> Option<&FileOffset>293 pub fn file_offset(&self) -> Option<&FileOffset> {
294 self.file_offset.as_ref()
295 }
296
297 /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region.
prot(&self) -> i32298 pub fn prot(&self) -> i32 {
299 self.prot
300 }
301
302 /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region.
flags(&self) -> i32303 pub fn flags(&self) -> i32 {
304 self.flags
305 }
306
307 /// Checks whether this region and `other` are backed by overlapping
308 /// [`FileOffset`](struct.FileOffset.html) objects.
309 ///
310 /// This is mostly a sanity check available for convenience, as different file descriptors
311 /// can alias the same file.
fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool312 pub fn fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool {
313 if let Some(f_off1) = self.file_offset() {
314 if let Some(f_off2) = other.file_offset() {
315 if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() {
316 let s1 = f_off1.start();
317 let s2 = f_off2.start();
318 let l1 = self.len() as u64;
319 let l2 = other.len() as u64;
320
321 if s1 < s2 {
322 return s1 + l1 > s2;
323 } else {
324 return s2 + l2 > s1;
325 }
326 }
327 }
328 }
329 false
330 }
331
332 /// Set the hugetlbfs of the region
set_hugetlbfs(&mut self, hugetlbfs: bool)333 pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
334 self.hugetlbfs = Some(hugetlbfs)
335 }
336
337 /// Returns `true` if the region is hugetlbfs
is_hugetlbfs(&self) -> Option<bool>338 pub fn is_hugetlbfs(&self) -> Option<bool> {
339 self.hugetlbfs
340 }
341
342 /// Returns a reference to the inner bitmap object.
bitmap(&self) -> &B343 pub fn bitmap(&self) -> &B {
344 &self.bitmap
345 }
346
347 /// Returns xen mmap flags.
xen_mmap_flags(&self) -> u32348 pub fn xen_mmap_flags(&self) -> u32 {
349 self.mmap.flags()
350 }
351
352 /// Returns xen mmap data.
xen_mmap_data(&self) -> u32353 pub fn xen_mmap_data(&self) -> u32 {
354 self.mmap.data()
355 }
356 }
357
358 impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
359 type B = B;
360
len(&self) -> usize361 fn len(&self) -> usize {
362 self.size
363 }
364
get_slice( &self, offset: usize, count: usize, ) -> volatile_memory::Result<VolatileSlice<BS<B>>>365 fn get_slice(
366 &self,
367 offset: usize,
368 count: usize,
369 ) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
370 let _ = self.compute_end_offset(offset, count)?;
371
372 let mmap_info = if self.mmap.mmap_in_advance() {
373 None
374 } else {
375 Some(&self.mmap)
376 };
377
378 Ok(
379 // SAFETY: Safe because we checked that offset + count was within our range and we only
380 // ever hand out volatile accessors.
381 unsafe {
382 VolatileSlice::with_bitmap(
383 self.as_ptr().add(offset),
384 count,
385 self.bitmap.slice_at(offset),
386 mmap_info,
387 )
388 },
389 )
390 }
391 }
392
393 #[derive(Clone, Debug, PartialEq)]
394 struct MmapUnix {
395 addr: *mut u8,
396 size: usize,
397 }
398
399 impl MmapUnix {
new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result<Self>400 fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result<Self> {
401 let addr =
402 // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters
403 // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or
404 // some wacky file).
405 unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) };
406
407 if addr == libc::MAP_FAILED {
408 return Err(Error::Mmap(io::Error::last_os_error()));
409 }
410
411 Ok(Self {
412 addr: addr as *mut u8,
413 size,
414 })
415 }
416
addr(&self) -> *mut u8417 fn addr(&self) -> *mut u8 {
418 self.addr
419 }
420 }
421
422 impl Drop for MmapUnix {
drop(&mut self)423 fn drop(&mut self) {
424 // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody
425 // else is holding a reference to it.
426 unsafe {
427 libc::munmap(self.addr as *mut libc::c_void, self.size);
428 }
429 }
430 }
431
432 // Bit mask for the vhost-user xen mmap message.
433 bitflags! {
434 /// Flags for the Xen mmap message.
435 pub struct MmapXenFlags: u32 {
436 /// Standard Unix memory mapping.
437 const UNIX = 0x0;
438 /// Xen foreign memory (accessed via /dev/privcmd).
439 const FOREIGN = 0x1;
440 /// Xen grant memory (accessed via /dev/gntdev).
441 const GRANT = 0x2;
442 /// Xen no advance mapping.
443 const NO_ADVANCE_MAP = 0x8;
444 /// All valid mappings.
445 const ALL = Self::FOREIGN.bits() | Self::GRANT.bits();
446 }
447 }
448
449 impl MmapXenFlags {
450 /// Mmap flags are valid.
is_valid(&self) -> bool451 pub fn is_valid(&self) -> bool {
452 // only one of unix, foreign or grant should be set and mmap_in_advance() should be true
453 // with foreign and unix.
454 if self.is_grant() {
455 !self.is_foreign()
456 } else if self.is_foreign() || self.is_unix() {
457 self.mmap_in_advance()
458 } else {
459 false
460 }
461 }
462
463 /// Is standard Unix memory.
is_unix(&self) -> bool464 pub fn is_unix(&self) -> bool {
465 self.bits() == Self::UNIX.bits()
466 }
467
468 /// Is xen foreign memory.
is_foreign(&self) -> bool469 pub fn is_foreign(&self) -> bool {
470 self.contains(Self::FOREIGN)
471 }
472
473 /// Is xen grant memory.
is_grant(&self) -> bool474 pub fn is_grant(&self) -> bool {
475 self.contains(Self::GRANT)
476 }
477
478 /// Can mmap entire region in advance.
mmap_in_advance(&self) -> bool479 pub fn mmap_in_advance(&self) -> bool {
480 !self.contains(Self::NO_ADVANCE_MAP)
481 }
482 }
483
page_size() -> u64484 fn page_size() -> u64 {
485 // SAFETY: Safe because this call just returns the page size and doesn't have any side effects.
486 unsafe { libc::sysconf(_SC_PAGESIZE) as u64 }
487 }
488
pages(size: usize) -> (usize, usize)489 fn pages(size: usize) -> (usize, usize) {
490 let page_size = page_size() as usize;
491 let num = (size + page_size - 1) / page_size;
492
493 (num, page_size * num)
494 }
495
validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)>496 fn validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)> {
497 let file_offset = match file_offset {
498 Some(f) => f,
499 None => return Err(Error::InvalidFileOffset),
500 };
501
502 let fd = file_offset.file().as_raw_fd();
503 let f_offset = file_offset.start();
504
505 // We don't allow file offsets with Xen foreign mappings.
506 if f_offset != 0 {
507 return Err(Error::InvalidOffsetLength);
508 }
509
510 Ok((fd, f_offset))
511 }
512
513 // Xen Foreign memory mapping interface.
514 trait MmapXenTrait: std::fmt::Debug {
mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>515 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>;
addr(&self) -> *mut u8516 fn addr(&self) -> *mut u8;
517 }
518
519 // Standard Unix memory mapping for testing other crates.
520 #[derive(Clone, Debug, PartialEq)]
521 struct MmapXenUnix(MmapUnix);
522
523 impl MmapXenUnix {
new(range: &MmapRange) -> Result<Self>524 fn new(range: &MmapRange) -> Result<Self> {
525 let (fd, offset) = if let Some(ref f_off) = range.file_offset {
526 check_file_offset(f_off, range.size)?;
527 (f_off.file().as_raw_fd(), f_off.start())
528 } else {
529 (-1, 0)
530 };
531
532 Ok(Self(MmapUnix::new(
533 range.size,
534 range.prot.ok_or(Error::UnexpectedError)?,
535 range.flags.ok_or(Error::UnexpectedError)?,
536 fd,
537 offset,
538 )?))
539 }
540 }
541
542 impl MmapXenTrait for MmapXenUnix {
543 #[allow(unused_variables)]
mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>544 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
545 Err(Error::MappedInAdvance)
546 }
547
addr(&self) -> *mut u8548 fn addr(&self) -> *mut u8 {
549 self.0.addr()
550 }
551 }
552
553 // Privcmd mmap batch v2 command
554 //
555 // include/uapi/xen/privcmd.h: `privcmd_mmapbatch_v2`
556 #[repr(C)]
557 #[derive(Debug, Copy, Clone)]
558 struct PrivCmdMmapBatchV2 {
559 // number of pages to populate
560 num: u32,
561 // target domain
562 domid: u16,
563 // virtual address
564 addr: *mut c_void,
565 // array of mfns
566 arr: *const u64,
567 // array of error codes
568 err: *mut c_int,
569 }
570
571 const XEN_PRIVCMD_TYPE: u32 = 'P' as u32;
572
573 // #define IOCTL_PRIVCMD_MMAPBATCH_V2 _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t))
ioctl_privcmd_mmapbatch_v2() -> c_ulong574 fn ioctl_privcmd_mmapbatch_v2() -> c_ulong {
575 ioctl_expr(
576 _IOC_NONE,
577 XEN_PRIVCMD_TYPE,
578 4,
579 size_of::<PrivCmdMmapBatchV2>() as u32,
580 )
581 }
582
583 // Xen foreign memory specific implementation.
584 #[derive(Clone, Debug, PartialEq)]
585 struct MmapXenForeign {
586 domid: u32,
587 guest_base: GuestAddress,
588 unix_mmap: MmapUnix,
589 fd: i32,
590 }
591
592 impl AsRawFd for MmapXenForeign {
as_raw_fd(&self) -> i32593 fn as_raw_fd(&self) -> i32 {
594 self.fd
595 }
596 }
597
598 impl MmapXenForeign {
new(range: &MmapRange) -> Result<Self>599 fn new(range: &MmapRange) -> Result<Self> {
600 let (fd, f_offset) = validate_file(&range.file_offset)?;
601 let (count, size) = pages(range.size);
602
603 let unix_mmap = MmapUnix::new(
604 size,
605 range.prot.ok_or(Error::UnexpectedError)?,
606 range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED,
607 fd,
608 f_offset,
609 )?;
610
611 let foreign = Self {
612 domid: range.mmap_data,
613 guest_base: range.addr,
614 unix_mmap,
615 fd,
616 };
617
618 foreign.mmap_ioctl(count)?;
619 Ok(foreign)
620 }
621
622 // Ioctl to pass additional information to mmap infrastructure of privcmd driver.
mmap_ioctl(&self, count: usize) -> Result<()>623 fn mmap_ioctl(&self, count: usize) -> Result<()> {
624 let base = self.guest_base.0 / page_size();
625
626 let mut pfn = Vec::with_capacity(count);
627 for i in 0..count {
628 pfn.push(base + i as u64);
629 }
630
631 let mut err: Vec<c_int> = vec![0; count];
632
633 let map = PrivCmdMmapBatchV2 {
634 num: count as u32,
635 domid: self.domid as u16,
636 addr: self.addr() as *mut c_void,
637 arr: pfn.as_ptr(),
638 err: err.as_mut_ptr(),
639 };
640
641 // SAFETY: This is safe because the ioctl guarantees to not access memory beyond `map`.
642 let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) };
643
644 if ret == 0 {
645 Ok(())
646 } else {
647 Err(Error::Mmap(io::Error::last_os_error()))
648 }
649 }
650 }
651
652 impl MmapXenTrait for MmapXenForeign {
653 #[allow(unused_variables)]
mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>654 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
655 Err(Error::MappedInAdvance)
656 }
657
addr(&self) -> *mut u8658 fn addr(&self) -> *mut u8 {
659 self.unix_mmap.addr()
660 }
661 }
662
663 // Xen Grant memory mapping interface.
664
665 const XEN_GRANT_ADDR_OFF: u64 = 1 << 63;
666
667 // Grant reference
668 //
669 // include/uapi/xen/gntdev.h: `ioctl_gntdev_grant_ref`
670 #[repr(C)]
671 #[derive(Copy, Clone, Debug, Default, PartialEq)]
672 struct GntDevGrantRef {
673 // The domain ID of the grant to be mapped.
674 domid: u32,
675 // The grant reference of the grant to be mapped.
676 reference: u32,
677 }
678
679 #[repr(C)]
680 #[derive(Debug, Default, PartialEq, Eq)]
681 struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
682 impl<T> __IncompleteArrayField<T> {
683 #[inline]
as_ptr(&self) -> *const T684 unsafe fn as_ptr(&self) -> *const T {
685 self as *const __IncompleteArrayField<T> as *const T
686 }
687 #[inline]
as_mut_ptr(&mut self) -> *mut T688 unsafe fn as_mut_ptr(&mut self) -> *mut T {
689 self as *mut __IncompleteArrayField<T> as *mut T
690 }
691 #[inline]
as_slice(&self, len: usize) -> &[T]692 unsafe fn as_slice(&self, len: usize) -> &[T] {
693 ::std::slice::from_raw_parts(self.as_ptr(), len)
694 }
695 #[inline]
as_mut_slice(&mut self, len: usize) -> &mut [T]696 unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
697 ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
698 }
699 }
700
701 // Grant dev mapping reference
702 //
703 // include/uapi/xen/gntdev.h: `ioctl_gntdev_map_grant_ref`
704 #[repr(C)]
705 #[derive(Debug, Default)]
706 struct GntDevMapGrantRef {
707 // The number of grants to be mapped.
708 count: u32,
709 // Unused padding
710 pad: u32,
711 // The offset to be used on a subsequent call to mmap().
712 index: u64,
713 // Array of grant references, of size @count.
714 refs: __IncompleteArrayField<GntDevGrantRef>,
715 }
716
717 generate_fam_struct_impl!(
718 GntDevMapGrantRef,
719 GntDevGrantRef,
720 refs,
721 u32,
722 count,
723 usize::MAX
724 );
725
726 type GntDevMapGrantRefWrapper = FamStructWrapper<GntDevMapGrantRef>;
727
728 impl GntDevMapGrantRef {
new(domid: u32, base: u32, count: usize) -> Result<GntDevMapGrantRefWrapper>729 fn new(domid: u32, base: u32, count: usize) -> Result<GntDevMapGrantRefWrapper> {
730 let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?;
731 let refs = wrapper.as_mut_slice();
732
733 // GntDevMapGrantRef's pad and index are initialized to 0 by Fam layer.
734 for (i, r) in refs.iter_mut().enumerate().take(count) {
735 r.domid = domid;
736 r.reference = base + i as u32;
737 }
738
739 Ok(wrapper)
740 }
741 }
742
743 // Grant dev un-mapping reference
744 //
745 // include/uapi/xen/gntdev.h: `ioctl_gntdev_unmap_grant_ref`
746 #[repr(C)]
747 #[derive(Debug, Copy, Clone)]
748 struct GntDevUnmapGrantRef {
749 // The offset returned by the map operation.
750 index: u64,
751 // The number of grants to be unmapped.
752 count: u32,
753 // Unused padding
754 pad: u32,
755 }
756
757 impl GntDevUnmapGrantRef {
new(index: u64, count: u32) -> Self758 fn new(index: u64, count: u32) -> Self {
759 Self {
760 index,
761 count,
762 pad: 0,
763 }
764 }
765 }
766
767 const XEN_GNTDEV_TYPE: u32 = 'G' as u32;
768
769 // #define IOCTL_GNTDEV_MAP_GRANT_REF _IOC(_IOC_NONE, 'G', 0, sizeof(ioctl_gntdev_map_grant_ref))
ioctl_gntdev_map_grant_ref() -> c_ulong770 fn ioctl_gntdev_map_grant_ref() -> c_ulong {
771 ioctl_expr(
772 _IOC_NONE,
773 XEN_GNTDEV_TYPE,
774 0,
775 (size_of::<GntDevMapGrantRef>() + size_of::<GntDevGrantRef>()) as u32,
776 )
777 }
778
779 // #define IOCTL_GNTDEV_UNMAP_GRANT_REF _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
ioctl_gntdev_unmap_grant_ref() -> c_ulong780 fn ioctl_gntdev_unmap_grant_ref() -> c_ulong {
781 ioctl_expr(
782 _IOC_NONE,
783 XEN_GNTDEV_TYPE,
784 1,
785 size_of::<GntDevUnmapGrantRef>() as u32,
786 )
787 }
788
789 // Xen grant memory specific implementation.
790 #[derive(Clone, Debug)]
791 struct MmapXenGrant {
792 guest_base: GuestAddress,
793 unix_mmap: Option<MmapUnix>,
794 file_offset: FileOffset,
795 flags: i32,
796 size: usize,
797 index: u64,
798 domid: u32,
799 }
800
801 impl AsRawFd for MmapXenGrant {
as_raw_fd(&self) -> i32802 fn as_raw_fd(&self) -> i32 {
803 self.file_offset.file().as_raw_fd()
804 }
805 }
806
807 impl MmapXenGrant {
new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result<Self>808 fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result<Self> {
809 validate_file(&range.file_offset)?;
810
811 let mut grant = Self {
812 guest_base: range.addr,
813 unix_mmap: None,
814 file_offset: range.file_offset.as_ref().unwrap().clone(),
815 flags: range.flags.ok_or(Error::UnexpectedError)?,
816 size: 0,
817 index: 0,
818 domid: range.mmap_data,
819 };
820
821 // Region can't be mapped in advance, partial mapping will be done later via
822 // `MmapXenSlice`.
823 if mmap_flags.mmap_in_advance() {
824 let (unix_mmap, index) = grant.mmap_range(
825 range.addr,
826 range.size,
827 range.prot.ok_or(Error::UnexpectedError)?,
828 )?;
829
830 grant.unix_mmap = Some(unix_mmap);
831 grant.index = index;
832 grant.size = range.size;
833 }
834
835 Ok(grant)
836 }
837
mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)>838 fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> {
839 let (count, size) = pages(size);
840 let index = self.mmap_ioctl(addr, count)?;
841 let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?;
842
843 Ok((unix_mmap, index))
844 }
845
unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64)846 fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) {
847 let (count, _) = pages(size);
848
849 // Unmap the address first.
850 drop(unix_mmap);
851 self.unmap_ioctl(count as u32, index).unwrap();
852 }
853
mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result<u64>854 fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result<u64> {
855 let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32;
856 let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?;
857 let reference = wrapper.as_fam_struct_ref();
858
859 // SAFETY: This is safe because the ioctl guarantees to not access memory beyond reference.
860 let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) };
861
862 if ret == 0 {
863 Ok(reference.index)
864 } else {
865 Err(Error::Mmap(io::Error::last_os_error()))
866 }
867 }
868
unmap_ioctl(&self, count: u32, index: u64) -> Result<()>869 fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> {
870 let unmap = GntDevUnmapGrantRef::new(index, count);
871
872 // SAFETY: This is safe because the ioctl guarantees to not access memory beyond unmap.
873 let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) };
874
875 if ret == 0 {
876 Ok(())
877 } else {
878 Err(Error::Mmap(io::Error::last_os_error()))
879 }
880 }
881 }
882
883 impl MmapXenTrait for MmapXenGrant {
884 // Maps a slice out of the entire region.
mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>885 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
886 MmapXenSlice::new_with(self.clone(), addr as usize, prot, len)
887 }
888
addr(&self) -> *mut u8889 fn addr(&self) -> *mut u8 {
890 if let Some(ref unix_mmap) = self.unix_mmap {
891 unix_mmap.addr()
892 } else {
893 null_mut()
894 }
895 }
896 }
897
898 impl Drop for MmapXenGrant {
drop(&mut self)899 fn drop(&mut self) {
900 if let Some(unix_mmap) = self.unix_mmap.take() {
901 self.unmap_range(unix_mmap, self.size, self.index);
902 }
903 }
904 }
905
906 #[derive(Debug)]
907 pub(crate) struct MmapXenSlice {
908 grant: Option<MmapXenGrant>,
909 unix_mmap: Option<MmapUnix>,
910 addr: *mut u8,
911 size: usize,
912 index: u64,
913 }
914
915 impl MmapXenSlice {
raw(addr: *mut u8) -> Self916 fn raw(addr: *mut u8) -> Self {
917 Self {
918 grant: None,
919 unix_mmap: None,
920 addr,
921 size: 0,
922 index: 0,
923 }
924 }
925
new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self>926 fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self> {
927 let page_size = page_size() as usize;
928 let page_base: usize = (offset / page_size) * page_size;
929 let offset = offset - page_base;
930 let size = offset + size;
931
932 let addr = grant.guest_base.0 + page_base as u64;
933 let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?;
934
935 // SAFETY: We have already mapped the range including offset.
936 let addr = unsafe { unix_mmap.addr().add(offset) };
937
938 Ok(Self {
939 grant: Some(grant),
940 unix_mmap: Some(unix_mmap),
941 addr,
942 size,
943 index,
944 })
945 }
946
947 // Mapped address for the region.
addr(&self) -> *mut u8948 pub(crate) fn addr(&self) -> *mut u8 {
949 self.addr
950 }
951 }
952
953 impl Drop for MmapXenSlice {
drop(&mut self)954 fn drop(&mut self) {
955 // Unmaps memory automatically once this instance goes out of scope.
956 if let Some(unix_mmap) = self.unix_mmap.take() {
957 self.grant
958 .as_ref()
959 .unwrap()
960 .unmap_range(unix_mmap, self.size, self.index);
961 }
962 }
963 }
964
965 #[derive(Debug)]
966 pub struct MmapXen {
967 xen_flags: MmapXenFlags,
968 domid: u32,
969 mmap: Box<dyn MmapXenTrait>,
970 }
971
972 impl MmapXen {
new(range: &MmapRange) -> Result<Self>973 fn new(range: &MmapRange) -> Result<Self> {
974 let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) {
975 Some(flags) => flags,
976 None => return Err(Error::MmapFlags(range.mmap_flags)),
977 };
978
979 if !xen_flags.is_valid() {
980 return Err(Error::MmapFlags(xen_flags.bits()));
981 }
982
983 Ok(Self {
984 xen_flags,
985 domid: range.mmap_data,
986 mmap: if xen_flags.is_foreign() {
987 Box::new(MmapXenForeign::new(range)?)
988 } else if xen_flags.is_grant() {
989 Box::new(MmapXenGrant::new(range, xen_flags)?)
990 } else {
991 Box::new(MmapXenUnix::new(range)?)
992 },
993 })
994 }
995
addr(&self) -> *mut u8996 fn addr(&self) -> *mut u8 {
997 self.mmap.addr()
998 }
999
flags(&self) -> u321000 fn flags(&self) -> u32 {
1001 self.xen_flags.bits()
1002 }
1003
data(&self) -> u321004 fn data(&self) -> u32 {
1005 self.domid
1006 }
1007
mmap_in_advance(&self) -> bool1008 fn mmap_in_advance(&self) -> bool {
1009 self.xen_flags.mmap_in_advance()
1010 }
1011
mmap( mmap_xen: Option<&Self>, addr: *mut u8, prot: i32, len: usize, ) -> MmapXenSlice1012 pub(crate) fn mmap(
1013 mmap_xen: Option<&Self>,
1014 addr: *mut u8,
1015 prot: i32,
1016 len: usize,
1017 ) -> MmapXenSlice {
1018 match mmap_xen {
1019 Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(),
1020 None => MmapXenSlice::raw(addr),
1021 }
1022 }
1023 }
1024
1025 #[cfg(test)]
1026 mod tests {
1027 #![allow(clippy::undocumented_unsafe_blocks)]
1028
1029 use super::*;
1030 use vmm_sys_util::tempfile::TempFile;
1031
1032 // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
1033 // distinctive value when the error is represented by another variant.
1034 impl Error {
raw_os_error(&self) -> i321035 fn raw_os_error(&self) -> i32 {
1036 match self {
1037 Error::Mmap(e) => e.raw_os_error().unwrap(),
1038 _ => std::i32::MIN,
1039 }
1040 }
1041 }
1042
1043 #[allow(unused_variables)]
ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int1044 pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
1045 0
1046 }
1047
1048 impl MmapRange {
initialized(is_file: bool) -> Self1049 fn initialized(is_file: bool) -> Self {
1050 let file_offset = if is_file {
1051 Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0))
1052 } else {
1053 None
1054 };
1055
1056 let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000));
1057 range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
1058 range.mmap_data = 1;
1059
1060 range
1061 }
1062 }
1063
1064 impl MmapRegion {
new(size: usize) -> Result<Self>1065 pub fn new(size: usize) -> Result<Self> {
1066 let range = MmapRange::new_unix(size, None, GuestAddress(0));
1067 Self::from_range(range)
1068 }
1069 }
1070
1071 #[test]
test_mmap_xen_failures()1072 fn test_mmap_xen_failures() {
1073 let mut range = MmapRange::initialized(true);
1074 // Invalid flags
1075 range.mmap_flags = 16;
1076
1077 let r = MmapXen::new(&range);
1078 assert_eq!(
1079 format!("{:?}", r.unwrap_err()),
1080 format!("MmapFlags({})", range.mmap_flags),
1081 );
1082
1083 range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits();
1084 let r = MmapXen::new(&range);
1085 assert_eq!(
1086 format!("{:?}", r.unwrap_err()),
1087 format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()),
1088 );
1089
1090 range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1091 let r = MmapXen::new(&range);
1092 assert_eq!(
1093 format!("{:?}", r.unwrap_err()),
1094 format!(
1095 "MmapFlags({:x})",
1096 MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(),
1097 ),
1098 );
1099 }
1100
1101 #[test]
test_mmap_xen_success()1102 fn test_mmap_xen_success() {
1103 let mut range = MmapRange::initialized(true);
1104 range.mmap_flags = MmapXenFlags::FOREIGN.bits();
1105
1106 let r = MmapXen::new(&range).unwrap();
1107 assert_eq!(r.flags(), range.mmap_flags);
1108 assert_eq!(r.data(), range.mmap_data);
1109 assert_ne!(r.addr(), null_mut());
1110 assert!(r.mmap_in_advance());
1111
1112 range.mmap_flags = MmapXenFlags::GRANT.bits();
1113 let r = MmapXen::new(&range).unwrap();
1114 assert_eq!(r.flags(), range.mmap_flags);
1115 assert_eq!(r.data(), range.mmap_data);
1116 assert_ne!(r.addr(), null_mut());
1117 assert!(r.mmap_in_advance());
1118
1119 range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1120 let r = MmapXen::new(&range).unwrap();
1121 assert_eq!(r.flags(), range.mmap_flags);
1122 assert_eq!(r.data(), range.mmap_data);
1123 assert_eq!(r.addr(), null_mut());
1124 assert!(!r.mmap_in_advance());
1125 }
1126
1127 #[test]
test_foreign_map_failure()1128 fn test_foreign_map_failure() {
1129 let mut range = MmapRange::initialized(true);
1130 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0));
1131 range.prot = None;
1132 let r = MmapXenForeign::new(&range);
1133 assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
1134
1135 let mut range = MmapRange::initialized(true);
1136 range.flags = None;
1137 let r = MmapXenForeign::new(&range);
1138 assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
1139
1140 let mut range = MmapRange::initialized(true);
1141 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1142 let r = MmapXenForeign::new(&range);
1143 assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength");
1144
1145 let mut range = MmapRange::initialized(true);
1146 range.size = 0;
1147 let r = MmapXenForeign::new(&range);
1148 assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1149 }
1150
1151 #[test]
test_foreign_map_success()1152 fn test_foreign_map_success() {
1153 let range = MmapRange::initialized(true);
1154 let r = MmapXenForeign::new(&range).unwrap();
1155 assert_ne!(r.addr(), null_mut());
1156 assert_eq!(r.domid, range.mmap_data);
1157 assert_eq!(r.guest_base, range.addr);
1158 }
1159
1160 #[test]
test_grant_map_failure()1161 fn test_grant_map_failure() {
1162 let mut range = MmapRange::initialized(true);
1163 range.prot = None;
1164 let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1165 assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
1166
1167 let mut range = MmapRange::initialized(true);
1168 range.prot = None;
1169 // Protection isn't used for no-advance mappings
1170 MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1171
1172 let mut range = MmapRange::initialized(true);
1173 range.flags = None;
1174 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1175 assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError");
1176
1177 let mut range = MmapRange::initialized(true);
1178 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1179 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1180 assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength");
1181
1182 let mut range = MmapRange::initialized(true);
1183 range.size = 0;
1184 let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1185 assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1186 }
1187
1188 #[test]
test_grant_map_success()1189 fn test_grant_map_success() {
1190 let range = MmapRange::initialized(true);
1191 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1192 assert_eq!(r.addr(), null_mut());
1193 assert_eq!(r.domid, range.mmap_data);
1194 assert_eq!(r.guest_base, range.addr);
1195
1196 let mut range = MmapRange::initialized(true);
1197 // Size isn't used with no-advance mapping.
1198 range.size = 0;
1199 MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1200
1201 let range = MmapRange::initialized(true);
1202 let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap();
1203 assert_ne!(r.addr(), null_mut());
1204 assert_eq!(r.domid, range.mmap_data);
1205 assert_eq!(r.guest_base, range.addr);
1206 }
1207
1208 #[test]
test_grant_ref_alloc()1209 fn test_grant_ref_alloc() {
1210 let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap();
1211 let r = wrapper.as_fam_struct_ref();
1212 assert_eq!(r.count, 0x100);
1213 assert_eq!(r.pad, 0);
1214 assert_eq!(r.index, 0);
1215 }
1216 }
1217