xref: /aosp_15_r20/external/crosvm/hypervisor/src/gunyah/mod.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
6 mod aarch64;
7 
8 mod gunyah_sys;
9 use std::cmp::Reverse;
10 use std::collections::BTreeMap;
11 use std::collections::BinaryHeap;
12 use std::collections::HashSet;
13 use std::ffi::CString;
14 use std::fs::File;
15 use std::mem::size_of;
16 use std::os::raw::c_ulong;
17 use std::os::unix::prelude::OsStrExt;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::sync::Arc;
21 
22 use base::errno_result;
23 use base::info;
24 use base::ioctl;
25 use base::ioctl_with_ref;
26 use base::ioctl_with_val;
27 use base::pagesize;
28 use base::warn;
29 use base::Error;
30 use base::FromRawDescriptor;
31 use base::MemoryMapping;
32 use base::MemoryMappingBuilder;
33 use base::MmapError;
34 use base::RawDescriptor;
35 use gunyah_sys::*;
36 use libc::open;
37 use libc::EFAULT;
38 use libc::EINVAL;
39 use libc::EIO;
40 use libc::ENOENT;
41 use libc::ENOSPC;
42 use libc::ENOTSUP;
43 use libc::EOVERFLOW;
44 use libc::O_CLOEXEC;
45 use libc::O_RDWR;
46 use sync::Mutex;
47 use vm_memory::MemoryRegionPurpose;
48 
49 use crate::*;
50 
51 pub struct Gunyah {
52     gunyah: SafeDescriptor,
53 }
54 
55 impl AsRawDescriptor for Gunyah {
as_raw_descriptor(&self) -> RawDescriptor56     fn as_raw_descriptor(&self) -> RawDescriptor {
57         self.gunyah.as_raw_descriptor()
58     }
59 }
60 
61 impl Gunyah {
new_with_path(device_path: &Path) -> Result<Gunyah>62     pub fn new_with_path(device_path: &Path) -> Result<Gunyah> {
63         let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
64         // SAFETY:
65         // Open calls are safe because we give a nul-terminated string and verify the result.
66         let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
67         if ret < 0 {
68             return errno_result();
69         }
70         Ok(Gunyah {
71             // SAFETY:
72             // Safe because we verify that ret is valid and we own the fd.
73             gunyah: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
74         })
75     }
76 
new() -> Result<Gunyah>77     pub fn new() -> Result<Gunyah> {
78         Gunyah::new_with_path(&PathBuf::from("/dev/gunyah"))
79     }
80 }
81 
82 impl Hypervisor for Gunyah {
try_clone(&self) -> Result<Self> where Self: Sized,83     fn try_clone(&self) -> Result<Self>
84     where
85         Self: Sized,
86     {
87         Ok(Gunyah {
88             gunyah: self.gunyah.try_clone()?,
89         })
90     }
91 
check_capability(&self, cap: HypervisorCap) -> bool92     fn check_capability(&self, cap: HypervisorCap) -> bool {
93         match cap {
94             HypervisorCap::UserMemory => true,
95             HypervisorCap::ArmPmuV3 => false,
96             HypervisorCap::ImmediateExit => true,
97             HypervisorCap::StaticSwiotlbAllocationRequired => true,
98             HypervisorCap::HypervisorInitializedBootContext => true,
99             HypervisorCap::S390UserSigp | HypervisorCap::TscDeadlineTimer => false,
100             #[cfg(target_arch = "x86_64")]
101             HypervisorCap::Xcrs | HypervisorCap::CalibratedTscLeafRequired => false,
102         }
103     }
104 }
105 
android_lend_user_memory_region( vm: &SafeDescriptor, slot: MemSlot, read_only: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>106 unsafe fn android_lend_user_memory_region(
107     vm: &SafeDescriptor,
108     slot: MemSlot,
109     read_only: bool,
110     guest_addr: u64,
111     memory_size: u64,
112     userspace_addr: *mut u8,
113 ) -> Result<()> {
114     let mut flags = 0;
115 
116     flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
117     if !read_only {
118         flags |= GH_MEM_ALLOW_WRITE;
119     }
120 
121     let region = gh_userspace_memory_region {
122         label: slot,
123         flags,
124         guest_phys_addr: guest_addr,
125         memory_size,
126         userspace_addr: userspace_addr as u64,
127     };
128 
129     let ret = ioctl_with_ref(vm, GH_VM_ANDROID_LEND_USER_MEM, &region);
130     if ret == 0 {
131         Ok(())
132     } else {
133         errno_result()
134     }
135 }
136 
137 // Wrapper around GH_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
138 // from guest physical to host user pages.
139 //
140 // SAFETY:
141 // Safe when the guest regions are guaranteed not to overlap.
set_user_memory_region( vm: &SafeDescriptor, slot: MemSlot, read_only: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>142 unsafe fn set_user_memory_region(
143     vm: &SafeDescriptor,
144     slot: MemSlot,
145     read_only: bool,
146     guest_addr: u64,
147     memory_size: u64,
148     userspace_addr: *mut u8,
149 ) -> Result<()> {
150     let mut flags = 0;
151 
152     flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
153     if !read_only {
154         flags |= GH_MEM_ALLOW_WRITE;
155     }
156 
157     let region = gh_userspace_memory_region {
158         label: slot,
159         flags,
160         guest_phys_addr: guest_addr,
161         memory_size,
162         userspace_addr: userspace_addr as u64,
163     };
164 
165     let ret = ioctl_with_ref(vm, GH_VM_SET_USER_MEM_REGION, &region);
166     if ret == 0 {
167         Ok(())
168     } else {
169         errno_result()
170     }
171 }
172 
173 #[derive(PartialEq, Eq, Hash)]
174 pub struct GunyahIrqRoute {
175     irq: u32,
176     level: bool,
177 }
178 
179 pub struct GunyahVm {
180     gh: Gunyah,
181     vm: SafeDescriptor,
182     guest_mem: GuestMemory,
183     mem_regions: Arc<Mutex<BTreeMap<MemSlot, (Box<dyn MappedRegion>, GuestAddress)>>>,
184     /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
185     mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
186     routes: Arc<Mutex<HashSet<GunyahIrqRoute>>>,
187     hv_cfg: crate::Config,
188 }
189 
190 impl AsRawDescriptor for GunyahVm {
as_raw_descriptor(&self) -> RawDescriptor191     fn as_raw_descriptor(&self) -> RawDescriptor {
192         self.vm.as_raw_descriptor()
193     }
194 }
195 
196 impl GunyahVm {
new(gh: &Gunyah, guest_mem: GuestMemory, cfg: Config) -> Result<GunyahVm>197     pub fn new(gh: &Gunyah, guest_mem: GuestMemory, cfg: Config) -> Result<GunyahVm> {
198         // SAFETY:
199         // Safe because we know gunyah is a real gunyah fd as this module is the only one that can
200         // make Gunyah objects.
201         let ret = unsafe { ioctl_with_val(gh, GH_CREATE_VM, 0 as c_ulong) };
202         if ret < 0 {
203             return errno_result();
204         }
205 
206         // SAFETY:
207         // Safe because we verify that ret is valid and we own the fd.
208         let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
209         for region in guest_mem.regions() {
210             let lend = if cfg.protection_type.isolates_memory() {
211                 match region.options.purpose {
212                     MemoryRegionPurpose::GuestMemoryRegion => true,
213                     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
214                     MemoryRegionPurpose::ProtectedFirmwareRegion => true,
215                     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
216                     MemoryRegionPurpose::StaticSwiotlbRegion => false,
217                 }
218             } else {
219                 false
220             };
221             if lend {
222                 // SAFETY:
223                 // Safe because the guest regions are guarnteed not to overlap.
224                 unsafe {
225                     android_lend_user_memory_region(
226                         &vm_descriptor,
227                         region.index as MemSlot,
228                         false,
229                         region.guest_addr.offset(),
230                         region.size.try_into().unwrap(),
231                         region.host_addr as *mut u8,
232                     )?;
233                 }
234             } else {
235                 // SAFETY:
236                 // Safe because the guest regions are guarnteed not to overlap.
237                 unsafe {
238                     set_user_memory_region(
239                         &vm_descriptor,
240                         region.index as MemSlot,
241                         false,
242                         region.guest_addr.offset(),
243                         region.size.try_into().unwrap(),
244                         region.host_addr as *mut u8,
245                     )?;
246                 }
247             }
248         }
249 
250         Ok(GunyahVm {
251             gh: gh.try_clone()?,
252             vm: vm_descriptor,
253             guest_mem,
254             mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
255             mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
256             routes: Arc::new(Mutex::new(HashSet::new())),
257             hv_cfg: cfg,
258         })
259     }
260 
create_vcpu(&self, id: usize) -> Result<GunyahVcpu>261     fn create_vcpu(&self, id: usize) -> Result<GunyahVcpu> {
262         let gh_fn_vcpu_arg = gh_fn_vcpu_arg {
263             id: id.try_into().unwrap(),
264         };
265 
266         let function_desc = gh_fn_desc {
267             type_: GH_FN_VCPU,
268             arg_size: size_of::<gh_fn_vcpu_arg>() as u32,
269             // Safe because kernel is expecting pointer with non-zero arg_size
270             arg: &gh_fn_vcpu_arg as *const gh_fn_vcpu_arg as u64,
271         };
272 
273         // SAFETY:
274         // Safe because we know that our file is a VM fd and we verify the return result.
275         let fd = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
276         if fd < 0 {
277             return errno_result();
278         }
279 
280         // SAFETY:
281         // Wrap the vcpu now in case the following ? returns early. This is safe because we verified
282         // the value of the fd and we own the fd.
283         let vcpu = unsafe { File::from_raw_descriptor(fd) };
284 
285         // SAFETY:
286         // Safe because we know this is a Gunyah VCPU
287         let res = unsafe { ioctl(&vcpu, GH_VCPU_MMAP_SIZE) };
288         if res < 0 {
289             return errno_result();
290         }
291         let run_mmap_size = res as usize;
292 
293         let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
294             .from_file(&vcpu)
295             .build()
296             .map_err(|_| Error::new(ENOSPC))?;
297 
298         Ok(GunyahVcpu {
299             vm: self.vm.try_clone()?,
300             vcpu,
301             id,
302             run_mmap: Arc::new(run_mmap),
303         })
304     }
305 
register_irqfd(&self, label: u32, evt: &Event, level: bool) -> Result<()>306     pub fn register_irqfd(&self, label: u32, evt: &Event, level: bool) -> Result<()> {
307         let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
308             fd: evt.as_raw_descriptor() as u32,
309             label,
310             flags: if level { GH_IRQFD_LEVEL } else { 0 },
311             ..Default::default()
312         };
313 
314         let function_desc = gh_fn_desc {
315             type_: GH_FN_IRQFD,
316             arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
317             // SAFETY:
318             // Safe because kernel is expecting pointer with non-zero arg_size
319             arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
320         };
321 
322         // SAFETY: safe because the return value is checked.
323         let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
324         if ret == 0 {
325             self.routes
326                 .lock()
327                 .insert(GunyahIrqRoute { irq: label, level });
328             Ok(())
329         } else {
330             errno_result()
331         }
332     }
333 
unregister_irqfd(&self, label: u32, _evt: &Event) -> Result<()>334     pub fn unregister_irqfd(&self, label: u32, _evt: &Event) -> Result<()> {
335         let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
336             label,
337             ..Default::default()
338         };
339 
340         let function_desc = gh_fn_desc {
341             type_: GH_FN_IRQFD,
342             arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
343             // Safe because kernel is expecting pointer with non-zero arg_size
344             arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
345         };
346 
347         // SAFETY: safe because memory is not modified and the return value is checked.
348         let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION, &function_desc) };
349         if ret == 0 {
350             Ok(())
351         } else {
352             errno_result()
353         }
354     }
355 
try_clone(&self) -> Result<Self> where Self: Sized,356     pub fn try_clone(&self) -> Result<Self>
357     where
358         Self: Sized,
359     {
360         Ok(GunyahVm {
361             gh: self.gh.try_clone()?,
362             vm: self.vm.try_clone()?,
363             guest_mem: self.guest_mem.clone(),
364             mem_regions: self.mem_regions.clone(),
365             mem_slot_gaps: self.mem_slot_gaps.clone(),
366             routes: self.routes.clone(),
367             hv_cfg: self.hv_cfg,
368         })
369     }
370 
set_dtb_config(&self, fdt_address: GuestAddress, fdt_size: usize) -> Result<()>371     fn set_dtb_config(&self, fdt_address: GuestAddress, fdt_size: usize) -> Result<()> {
372         let dtb_config = gh_vm_dtb_config {
373             guest_phys_addr: fdt_address.offset(),
374             size: fdt_size.try_into().unwrap(),
375         };
376 
377         // SAFETY:
378         // Safe because we know this is a Gunyah VM
379         let ret = unsafe { ioctl_with_ref(self, GH_VM_SET_DTB_CONFIG, &dtb_config) };
380         if ret == 0 {
381             Ok(())
382         } else {
383             errno_result()
384         }
385     }
386 
set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress, fw_size: u64) -> Result<()>387     fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress, fw_size: u64) -> Result<()> {
388         let fw_config = gh_vm_firmware_config {
389             guest_phys_addr: fw_addr.offset(),
390             size: fw_size,
391         };
392 
393         // SAFETY:
394         // Safe because we know this is a Gunyah VM
395         let ret = unsafe { ioctl_with_ref(self, GH_VM_ANDROID_SET_FW_CONFIG, &fw_config) };
396         if ret == 0 {
397             Ok(())
398         } else {
399             errno_result()
400         }
401     }
402 
start(&self) -> Result<()>403     fn start(&self) -> Result<()> {
404         // SAFETY: safe because memory is not modified and the return value is checked.
405         let ret = unsafe { ioctl(self, GH_VM_START) };
406         if ret == 0 {
407             Ok(())
408         } else {
409             errno_result()
410         }
411     }
412 }
413 
414 impl Vm for GunyahVm {
try_clone(&self) -> Result<Self> where Self: Sized,415     fn try_clone(&self) -> Result<Self>
416     where
417         Self: Sized,
418     {
419         Ok(GunyahVm {
420             gh: self.gh.try_clone()?,
421             vm: self.vm.try_clone()?,
422             guest_mem: self.guest_mem.clone(),
423             mem_regions: self.mem_regions.clone(),
424             mem_slot_gaps: self.mem_slot_gaps.clone(),
425             routes: self.routes.clone(),
426             hv_cfg: self.hv_cfg,
427         })
428     }
429 
check_capability(&self, c: VmCap) -> bool430     fn check_capability(&self, c: VmCap) -> bool {
431         match c {
432             VmCap::DirtyLog => false,
433             // Strictly speaking, Gunyah supports pvclock, but Gunyah takes care
434             // of it and crosvm doesn't need to do anything for it
435             VmCap::PvClock => false,
436             VmCap::Protected => true,
437             VmCap::EarlyInitCpuid => false,
438             #[cfg(target_arch = "x86_64")]
439             VmCap::BusLockDetect => false,
440             VmCap::ReadOnlyMemoryRegion => false,
441             VmCap::MemNoncoherentDma => false,
442         }
443     }
444 
get_guest_phys_addr_bits(&self) -> u8445     fn get_guest_phys_addr_bits(&self) -> u8 {
446         40
447     }
448 
get_memory(&self) -> &GuestMemory449     fn get_memory(&self) -> &GuestMemory {
450         &self.guest_mem
451     }
452 
add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, _log_dirty_pages: bool, _cache: MemCacheType, ) -> Result<MemSlot>453     fn add_memory_region(
454         &mut self,
455         guest_addr: GuestAddress,
456         mem_region: Box<dyn MappedRegion>,
457         read_only: bool,
458         _log_dirty_pages: bool,
459         _cache: MemCacheType,
460     ) -> Result<MemSlot> {
461         let pgsz = pagesize() as u64;
462         // Gunyah require to set the user memory region with page size aligned size. Safe to extend
463         // the mem.size() to be page size aligned because the mmap will round up the size to be
464         // page size aligned if it is not.
465         let size = (mem_region.size() as u64 + pgsz - 1) / pgsz * pgsz;
466         let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
467 
468         if self.guest_mem.range_overlap(guest_addr, end_addr) {
469             return Err(Error::new(ENOSPC));
470         }
471 
472         let mut regions = self.mem_regions.lock();
473         let mut gaps = self.mem_slot_gaps.lock();
474         let slot = match gaps.pop() {
475             Some(gap) => gap.0,
476             None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
477         };
478 
479         // SAFETY: safe because memory is not modified and the return value is checked.
480         let res = unsafe {
481             set_user_memory_region(
482                 &self.vm,
483                 slot,
484                 read_only,
485                 guest_addr.offset(),
486                 size,
487                 mem_region.as_ptr(),
488             )
489         };
490 
491         if let Err(e) = res {
492             gaps.push(Reverse(slot));
493             return Err(e);
494         }
495         regions.insert(slot, (mem_region, guest_addr));
496         Ok(slot)
497     }
498 
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>499     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
500         let mut regions = self.mem_regions.lock();
501         let (mem, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
502 
503         mem.msync(offset, size).map_err(|err| match err {
504             MmapError::InvalidAddress => Error::new(EFAULT),
505             MmapError::NotPageAligned => Error::new(EINVAL),
506             MmapError::SystemCallFailed(e) => e,
507             _ => Error::new(EIO),
508         })
509     }
510 
madvise_pageout_memory_region( &mut self, _slot: MemSlot, _offset: usize, _size: usize, ) -> Result<()>511     fn madvise_pageout_memory_region(
512         &mut self,
513         _slot: MemSlot,
514         _offset: usize,
515         _size: usize,
516     ) -> Result<()> {
517         Err(Error::new(ENOTSUP))
518     }
519 
madvise_remove_memory_region( &mut self, _slot: MemSlot, _offset: usize, _size: usize, ) -> Result<()>520     fn madvise_remove_memory_region(
521         &mut self,
522         _slot: MemSlot,
523         _offset: usize,
524         _size: usize,
525     ) -> Result<()> {
526         Err(Error::new(ENOTSUP))
527     }
528 
remove_memory_region(&mut self, _slot: MemSlot) -> Result<Box<dyn MappedRegion>>529     fn remove_memory_region(&mut self, _slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
530         unimplemented!()
531     }
532 
create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor>533     fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
534         unimplemented!()
535     }
536 
get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()>537     fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
538         unimplemented!()
539     }
540 
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>541     fn register_ioevent(
542         &mut self,
543         evt: &Event,
544         addr: IoEventAddress,
545         datamatch: Datamatch,
546     ) -> Result<()> {
547         let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
548             Datamatch::AnyLength => (false, 0, 0),
549             Datamatch::U8(v) => match v {
550                 Some(u) => (true, u as u64, 1),
551                 None => (false, 0, 1),
552             },
553             Datamatch::U16(v) => match v {
554                 Some(u) => (true, u as u64, 2),
555                 None => (false, 0, 2),
556             },
557             Datamatch::U32(v) => match v {
558                 Some(u) => (true, u as u64, 4),
559                 None => (false, 0, 4),
560             },
561             Datamatch::U64(v) => match v {
562                 Some(u) => (true, u, 8),
563                 None => (false, 0, 8),
564             },
565         };
566 
567         let mut flags = 0;
568         if do_datamatch {
569             flags |= 1 << GH_IOEVENTFD_DATAMATCH;
570         }
571 
572         let maddr = if let IoEventAddress::Mmio(maddr) = addr {
573             maddr
574         } else {
575             todo!()
576         };
577 
578         let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
579             fd: evt.as_raw_descriptor(),
580             datamatch: datamatch_value,
581             len: datamatch_len,
582             addr: maddr,
583             flags,
584             ..Default::default()
585         };
586 
587         let function_desc = gh_fn_desc {
588             type_: GH_FN_IOEVENTFD,
589             arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
590             arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
591         };
592 
593         // SAFETY: safe because memory is not modified and the return value is checked.
594         let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION, &function_desc) };
595         if ret == 0 {
596             Ok(())
597         } else {
598             errno_result()
599         }
600     }
601 
unregister_ioevent( &mut self, _evt: &Event, addr: IoEventAddress, _datamatch: Datamatch, ) -> Result<()>602     fn unregister_ioevent(
603         &mut self,
604         _evt: &Event,
605         addr: IoEventAddress,
606         _datamatch: Datamatch,
607     ) -> Result<()> {
608         let maddr = if let IoEventAddress::Mmio(maddr) = addr {
609             maddr
610         } else {
611             todo!()
612         };
613 
614         let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
615             addr: maddr,
616             ..Default::default()
617         };
618 
619         let function_desc = gh_fn_desc {
620             type_: GH_FN_IOEVENTFD,
621             arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
622             arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
623         };
624 
625         // SAFETY: safe because memory is not modified and the return value is checked.
626         let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION, &function_desc) };
627         if ret == 0 {
628             Ok(())
629         } else {
630             errno_result()
631         }
632     }
633 
handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()>634     fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
635         Ok(())
636     }
637 
get_pvclock(&self) -> Result<ClockState>638     fn get_pvclock(&self) -> Result<ClockState> {
639         unimplemented!()
640     }
641 
set_pvclock(&self, _state: &ClockState) -> Result<()>642     fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
643         unimplemented!()
644     }
645 
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>646     fn add_fd_mapping(
647         &mut self,
648         slot: u32,
649         offset: usize,
650         size: usize,
651         fd: &dyn AsRawDescriptor,
652         fd_offset: u64,
653         prot: Protection,
654     ) -> Result<()> {
655         let mut regions = self.mem_regions.lock();
656         let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
657 
658         match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
659             Ok(()) => Ok(()),
660             Err(MmapError::SystemCallFailed(e)) => Err(e),
661             Err(_) => Err(Error::new(EIO)),
662         }
663     }
664 
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>665     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
666         let mut regions = self.mem_regions.lock();
667         let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
668 
669         match region.remove_mapping(offset, size) {
670             Ok(()) => Ok(()),
671             Err(MmapError::SystemCallFailed(e)) => Err(e),
672             Err(_) => Err(Error::new(EIO)),
673         }
674     }
675 
handle_balloon_event(&mut self, _event: BalloonEvent) -> Result<()>676     fn handle_balloon_event(&mut self, _event: BalloonEvent) -> Result<()> {
677         unimplemented!()
678     }
679 }
680 
681 const GH_RM_EXIT_TYPE_VM_EXIT: u16 = 0;
682 const GH_RM_EXIT_TYPE_PSCI_POWER_OFF: u16 = 1;
683 const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET: u16 = 2;
684 const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2: u16 = 3;
685 const GH_RM_EXIT_TYPE_WDT_BITE: u16 = 4;
686 const GH_RM_EXIT_TYPE_HYP_ERROR: u16 = 5;
687 const GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT: u16 = 6;
688 const GH_RM_EXIT_TYPE_VM_FORCE_STOPPED: u16 = 7;
689 
690 pub struct GunyahVcpu {
691     vm: SafeDescriptor,
692     vcpu: File,
693     id: usize,
694     run_mmap: Arc<MemoryMapping>,
695 }
696 
697 struct GunyahVcpuSignalHandle {
698     run_mmap: Arc<MemoryMapping>,
699 }
700 
701 impl VcpuSignalHandleInner for GunyahVcpuSignalHandle {
signal_immediate_exit(&self)702     fn signal_immediate_exit(&self) {
703         // SAFETY: we ensure `run_mmap` is a valid mapping of `kvm_run` at creation time, and the
704         // `Arc` ensures the mapping still exists while we hold a reference to it.
705         unsafe {
706             let run = self.run_mmap.as_ptr() as *mut gh_vcpu_run;
707             (*run).immediate_exit = 1;
708         }
709     }
710 }
711 
712 impl AsRawDescriptor for GunyahVcpu {
as_raw_descriptor(&self) -> RawDescriptor713     fn as_raw_descriptor(&self) -> RawDescriptor {
714         self.vcpu.as_raw_descriptor()
715     }
716 }
717 
718 impl Vcpu for GunyahVcpu {
try_clone(&self) -> Result<Self> where Self: Sized,719     fn try_clone(&self) -> Result<Self>
720     where
721         Self: Sized,
722     {
723         let vcpu = self.vcpu.try_clone()?;
724 
725         Ok(GunyahVcpu {
726             vm: self.vm.try_clone()?,
727             vcpu,
728             id: self.id,
729             run_mmap: self.run_mmap.clone(),
730         })
731     }
732 
as_vcpu(&self) -> &dyn Vcpu733     fn as_vcpu(&self) -> &dyn Vcpu {
734         self
735     }
736 
run(&mut self) -> Result<VcpuExit>737     fn run(&mut self) -> Result<VcpuExit> {
738         // SAFETY:
739         // Safe because we know our file is a VCPU fd and we verify the return result.
740         let ret = unsafe { ioctl(self, GH_VCPU_RUN) };
741         if ret != 0 {
742             return errno_result();
743         }
744 
745         // SAFETY:
746         // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct
747         // because the kernel told us how large it is.
748         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
749         match run.exit_reason {
750             GH_VCPU_EXIT_MMIO => Ok(VcpuExit::Mmio),
751             GH_VCPU_EXIT_STATUS => {
752                 // SAFETY:
753                 // Safe because the exit_reason (which comes from the kernel) told us which
754                 // union field to use.
755                 let status = unsafe { &mut run.__bindgen_anon_1.status };
756                 match status.status {
757                     GH_VM_STATUS_GH_VM_STATUS_LOAD_FAILED => Ok(VcpuExit::FailEntry {
758                         hardware_entry_failure_reason: 0,
759                     }),
760                     GH_VM_STATUS_GH_VM_STATUS_CRASHED => Ok(VcpuExit::SystemEventCrash),
761                     GH_VM_STATUS_GH_VM_STATUS_EXITED => {
762                         info!("exit type {}", status.exit_info.type_);
763                         match status.exit_info.type_ {
764                             GH_RM_EXIT_TYPE_VM_EXIT => Ok(VcpuExit::SystemEventShutdown),
765                             GH_RM_EXIT_TYPE_PSCI_POWER_OFF => Ok(VcpuExit::SystemEventShutdown),
766                             GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET => Ok(VcpuExit::SystemEventReset),
767                             GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2 => Ok(VcpuExit::SystemEventReset),
768                             GH_RM_EXIT_TYPE_WDT_BITE => Ok(VcpuExit::SystemEventCrash),
769                             GH_RM_EXIT_TYPE_HYP_ERROR => Ok(VcpuExit::SystemEventCrash),
770                             GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT => Ok(VcpuExit::SystemEventCrash),
771                             GH_RM_EXIT_TYPE_VM_FORCE_STOPPED => Ok(VcpuExit::SystemEventShutdown),
772                             r => {
773                                 warn!("Unknown exit type: {}", r);
774                                 Err(Error::new(EINVAL))
775                             }
776                         }
777                     }
778                     r => {
779                         warn!("Unknown vm status: {}", r);
780                         Err(Error::new(EINVAL))
781                     }
782                 }
783             }
784             r => {
785                 warn!("unknown gh exit reason: {}", r);
786                 Err(Error::new(EINVAL))
787             }
788         }
789     }
790 
id(&self) -> usize791     fn id(&self) -> usize {
792         self.id
793     }
794 
set_immediate_exit(&self, exit: bool)795     fn set_immediate_exit(&self, exit: bool) {
796         // SAFETY:
797         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
798         // kernel told us how large it was. The pointer is page aligned so casting to a different
799         // type is well defined, hence the clippy allow attribute.
800         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
801         run.immediate_exit = exit.into();
802     }
803 
signal_handle(&self) -> VcpuSignalHandle804     fn signal_handle(&self) -> VcpuSignalHandle {
805         VcpuSignalHandle {
806             inner: Box::new(GunyahVcpuSignalHandle {
807                 run_mmap: self.run_mmap.clone(),
808             }),
809         }
810     }
811 
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>812     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
813         // SAFETY:
814         // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct because the
815         // kernel told us how large it was. The pointer is page aligned so casting to a different
816         // type is well defined
817         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
818         // Verify that the handler is called in the right context.
819         assert!(run.exit_reason == GH_VCPU_EXIT_MMIO);
820         // SAFETY:
821         // Safe because the exit_reason (which comes from the kernel) told us which
822         // union field to use.
823         let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
824         let address = mmio.phys_addr;
825         let data = &mut mmio.data[..mmio.len as usize];
826         if mmio.is_write != 0 {
827             handle_fn(IoParams {
828                 address,
829                 operation: IoOperation::Write(data),
830             })
831         } else {
832             handle_fn(IoParams {
833                 address,
834                 operation: IoOperation::Read(data),
835             })
836         }
837     }
838 
handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>839     fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
840         unreachable!()
841     }
842 
on_suspend(&self) -> Result<()>843     fn on_suspend(&self) -> Result<()> {
844         Ok(())
845     }
846 
enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()>847     unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
848         unimplemented!()
849     }
850 }
851