xref: /aosp_15_r20/external/crosvm/hypervisor/src/haxm/vcpu.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use core::ffi::c_void;
6 use std::arch::x86_64::CpuidResult;
7 use std::collections::BTreeMap;
8 use std::mem::size_of;
9 
10 use base::errno_result;
11 use base::ioctl;
12 use base::ioctl_with_mut_ref;
13 use base::ioctl_with_ptr_sized;
14 use base::ioctl_with_ref;
15 use base::warn;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::RawDescriptor;
19 use base::Result;
20 use base::SafeDescriptor;
21 use data_model::vec_with_array_field;
22 use libc::EINVAL;
23 use libc::ENOENT;
24 use libc::ENXIO;
25 use libc::EOPNOTSUPP;
26 use vm_memory::GuestAddress;
27 
28 use super::*;
29 use crate::CpuId;
30 use crate::CpuIdEntry;
31 use crate::DebugRegs;
32 use crate::DescriptorTable;
33 use crate::Fpu;
34 use crate::FpuReg;
35 use crate::IoOperation;
36 use crate::IoParams;
37 use crate::Regs;
38 use crate::Segment;
39 use crate::Sregs;
40 use crate::Vcpu;
41 use crate::VcpuExit;
42 use crate::VcpuShutdownError;
43 use crate::VcpuShutdownErrorKind;
44 use crate::VcpuX86_64;
45 use crate::Xsave;
46 
47 // HAXM exit reasons
48 // IO port request
49 const HAX_EXIT_IO: u32 = 1;
50 // MMIO instruction emulation, should not happen anymore, replaced with
51 // HAX_EXIT_FAST_MMIO
52 #[allow(dead_code)]
53 const HAX_EXIT_MMIO: u32 = 2;
54 // Real mode emulation when unrestricted guest is disabled
55 #[allow(dead_code)]
56 const HAX_EXIT_REALMODE: u32 = 3;
57 // Interrupt window open, crosvm can inject an interrupt now.
58 // Also used when vcpu thread receives a signal
59 const HAX_EXIT_INTERRUPT: u32 = 4;
60 // Unknown vmexit, mostly trigger reboot
61 #[allow(dead_code)]
62 const HAX_EXIT_UNKNOWN: u32 = 5;
63 // HALT from guest
64 const HAX_EXIT_HLT: u32 = 6;
65 // VCPU panic, like because of triple fault in guest
66 const HAX_EXIT_VCPU_PANIC: u32 = 7;
67 // Paused by crosvm setting _exit_reason to HAX_EXIT_PAUSED before entry
68 pub(crate) const HAX_EXIT_PAUSED: u32 = 8;
69 // MMIO instruction emulation through io_buffer
70 const HAX_EXIT_FAST_MMIO: u32 = 9;
71 // Page fault that was not able to be handled by HAXM
72 const HAX_EXIT_PAGEFAULT: u32 = 10;
73 // A debug exception caused a vmexit
74 const HAX_EXIT_DEBUG: u32 = 11;
75 
76 // HAXM exit directions
77 const HAX_EXIT_DIRECTION_PIO_IN: u32 = 1;
78 const HAX_EXIT_DIRECTION_PIO_OUT: u32 = 0;
79 const HAX_EXIT_DIRECTION_MMIO_READ: u8 = 0;
80 const HAX_EXIT_DIRECTION_MMIO_WRITE: u8 = 1;
81 
82 pub struct HaxmVcpu {
83     pub(super) descriptor: SafeDescriptor,
84     pub(super) id: usize,
85     pub(super) tunnel: *mut hax_tunnel,
86     pub(super) io_buffer: *mut c_void,
87 }
88 
89 // TODO(b/315998194): Add safety comment
90 #[allow(clippy::undocumented_unsafe_blocks)]
91 unsafe impl Send for HaxmVcpu {}
92 // TODO(b/315998194): Add safety comment
93 #[allow(clippy::undocumented_unsafe_blocks)]
94 unsafe impl Sync for HaxmVcpu {}
95 
96 impl AsRawDescriptor for HaxmVcpu {
as_raw_descriptor(&self) -> RawDescriptor97     fn as_raw_descriptor(&self) -> RawDescriptor {
98         self.descriptor.as_raw_descriptor()
99     }
100 }
101 
102 impl HaxmVcpu {
get_vcpu_state(&self) -> Result<VcpuState>103     fn get_vcpu_state(&self) -> Result<VcpuState> {
104         let mut state = vcpu_state_t::default();
105 
106         // SAFETY: trivially safe with return value checked.
107         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_GET_REGS, &mut state) };
108         if ret != 0 {
109             return errno_result();
110         }
111 
112         // Also read efer MSR
113         state.efer = self.get_msr(IA32_EFER)? as u32;
114 
115         Ok(VcpuState { state })
116     }
117 
set_vcpu_state(&self, state: &mut VcpuState) -> Result<()>118     fn set_vcpu_state(&self, state: &mut VcpuState) -> Result<()> {
119         // SAFETY: trivially safe with return value checked.
120         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_SET_REGS, &mut state.state) };
121         if ret != 0 {
122             return errno_result();
123         }
124 
125         // Also set efer MSR
126         self.set_msr(IA32_EFER, state.state.efer as u64)
127     }
128 }
129 
130 impl Vcpu for HaxmVcpu {
131     /// Makes a shallow clone of this `Vcpu`.
try_clone(&self) -> Result<Self>132     fn try_clone(&self) -> Result<Self> {
133         Ok(HaxmVcpu {
134             descriptor: self.descriptor.try_clone()?,
135             id: self.id,
136             tunnel: self.tunnel,
137             io_buffer: self.io_buffer,
138         })
139     }
140 
as_vcpu(&self) -> &dyn Vcpu141     fn as_vcpu(&self) -> &dyn Vcpu {
142         self
143     }
144 
145     /// Returns the vcpu id.
id(&self) -> usize146     fn id(&self) -> usize {
147         self.id
148     }
149 
150     /// Sets the bit that requests an immediate exit.
set_immediate_exit(&self, exit: bool)151     fn set_immediate_exit(&self, exit: bool) {
152         // SAFETY:
153         // Safe because we know the tunnel is a pointer to a hax_tunnel and we know its size.
154         // Crosvm's HAXM implementation does not use the _exit_reason, so it's fine if we
155         // overwrite it.
156         unsafe {
157             (*self.tunnel).exit_reason = if exit { HAX_EXIT_PAUSED } else { 0 };
158         }
159     }
160 
161     /// Signals to the hypervisor that this guest is being paused by userspace.
on_suspend(&self) -> Result<()>162     fn on_suspend(&self) -> Result<()> {
163         Ok(())
164     }
165 
166     /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
167     /// hypervisor API.  `args` are the arguments for enabling the feature, if any.
enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()>168     unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
169         // Haxm does not support enable_capability
170         Err(Error::new(libc::ENXIO))
171     }
172 
173     /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`.
174     ///
175     /// Once called, it will determine whether a mmio read or mmio write was the reason for the mmio
176     /// exit, call `handle_fn` with the respective IoOperation to perform the mmio read or
177     /// write, and set the return data in the vcpu so that the vcpu can resume running.
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>178     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
179         // SAFETY:
180         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
181         // kernel told us how large it was.
182         // Verify that the handler is called for mmio context only.
183         unsafe {
184             assert!((*self.tunnel).exit_status == HAX_EXIT_FAST_MMIO);
185         }
186         let mmio = self.io_buffer as *mut hax_fastmmio;
187         let (address, size, direction) =
188             // SAFETY:
189             // Safe because the exit_reason (which comes from the kernel) told us which
190             // union field to use.
191             unsafe { ((*mmio).gpa, (*mmio).size as usize, (*mmio).direction) };
192         // SAFETY:
193         // Safe because the exit_reason (which comes from the kernel) told us which
194         // union field to use. We use `addr_of_mut!()` to get a potentially unaligned u64 pointer,
195         // but it is then cast via a u8 pointer to a u8 slice, which has no alignment requirements.
196         let data = unsafe {
197             assert!(size <= size_of::<u64>());
198             std::slice::from_raw_parts_mut(
199                 std::ptr::addr_of_mut!((*mmio).__bindgen_anon_1.value) as *mut u8,
200                 size,
201             )
202         };
203 
204         match direction {
205             HAX_EXIT_DIRECTION_MMIO_READ => {
206                 handle_fn(IoParams {
207                     address,
208                     operation: IoOperation::Read(data),
209                 })
210                 // We have to unwrap/panic here because HAXM doesn't have a
211                 // facility to inject a GP fault here. Once HAXM can do that, we
212                 // should inject a GP fault & bubble the error.
213                 .unwrap();
214                 Ok(())
215             }
216             HAX_EXIT_DIRECTION_MMIO_WRITE => {
217                 handle_fn(IoParams {
218                     address,
219                     operation: IoOperation::Write(data),
220                 })
221                 // Similarly to the read direction, we MUST panic here.
222                 .unwrap();
223                 Ok(())
224             }
225             _ => Err(Error::new(EINVAL)),
226         }
227     }
228 
229     /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`.
230     ///
231     /// Once called, it will determine whether an io in or io out was the reason for the io exit,
232     /// call `handle_fn` with the respective IoOperation to perform the io in or io out,
233     /// and set the return data in the vcpu so that the vcpu can resume running.
234     #[allow(clippy::cast_ptr_alignment)]
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>235     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
236         // SAFETY:
237         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
238         // kernel told us how large it was.
239         // Verify that the handler is called for io context only.
240         unsafe {
241             assert!((*self.tunnel).exit_status == HAX_EXIT_IO);
242         }
243         // SAFETY:
244         // Safe because the exit_reason (which comes from the kernel) told us which
245         // union field to use.
246         let io = unsafe { (*self.tunnel).__bindgen_anon_1.io };
247         let address = io.port.into();
248         let size = io.size as usize;
249         let count = io.count as usize;
250         let data_len = count * size;
251         // SAFETY:
252         // Safe because the exit_reason (which comes from the kernel) told us that this is port io,
253         // where the iobuf can be treated as a *u8
254         let buffer: &mut [u8] =
255             unsafe { std::slice::from_raw_parts_mut(self.io_buffer as *mut u8, data_len) };
256         let data_chunks = buffer.chunks_mut(size);
257 
258         match io.direction as u32 {
259             HAX_EXIT_DIRECTION_PIO_IN => {
260                 for data in data_chunks {
261                     handle_fn(IoParams {
262                         address,
263                         operation: IoOperation::Read(data),
264                     });
265                 }
266                 Ok(())
267             }
268             HAX_EXIT_DIRECTION_PIO_OUT => {
269                 for data in data_chunks {
270                     handle_fn(IoParams {
271                         address,
272                         operation: IoOperation::Write(data),
273                     });
274                 }
275                 Ok(())
276             }
277             _ => Err(Error::new(EINVAL)),
278         }
279     }
280 
281     #[allow(clippy::cast_ptr_alignment)]
282     // The pointer is page aligned so casting to a different type is well defined, hence the clippy
283     // allow attribute.
run(&mut self) -> Result<VcpuExit>284     fn run(&mut self) -> Result<VcpuExit> {
285         // TODO(b/315998194): Add safety comment
286         #[allow(clippy::undocumented_unsafe_blocks)]
287         let ret = unsafe { ioctl(self, HAX_VCPU_IOCTL_RUN) };
288         if ret != 0 {
289             return errno_result();
290         }
291 
292         // SAFETY:
293         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
294         // kernel told us how large it was.
295         let exit_status = unsafe { (*self.tunnel).exit_status };
296 
297         match exit_status {
298             HAX_EXIT_IO => Ok(VcpuExit::Io),
299             HAX_EXIT_INTERRUPT => Ok(VcpuExit::Intr),
300             HAX_EXIT_HLT => Ok(VcpuExit::Hlt),
301             HAX_EXIT_VCPU_PANIC => {
302                 // SAFETY:
303                 // 1) we mapped enough memory to hold the hax_tunnel struct because the kernel told
304                 //    us how large it was. That memory is still alive here.
305                 let panic_reason = unsafe { (*self.tunnel).vcpu_panic_reason };
306                 Ok(VcpuExit::Shutdown(Err(VcpuShutdownError::new(
307                     VcpuShutdownErrorKind::Other,
308                     panic_reason as u64,
309                 ))))
310             }
311             HAX_EXIT_FAST_MMIO => Ok(VcpuExit::Mmio),
312             HAX_EXIT_PAGEFAULT => Ok(VcpuExit::Exception),
313             HAX_EXIT_DEBUG => Ok(VcpuExit::Debug),
314             HAX_EXIT_PAUSED => Ok(VcpuExit::Exception),
315             r => panic!("unknown exit reason: {}", r),
316         }
317     }
318 }
319 
320 impl VcpuX86_64 for HaxmVcpu {
321     /// Sets or clears the flag that requests the VCPU to exit when it becomes possible to inject
322     /// interrupts into the guest.
set_interrupt_window_requested(&self, requested: bool)323     fn set_interrupt_window_requested(&self, requested: bool) {
324         // SAFETY:
325         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
326         // kernel told us how large it was.
327         unsafe {
328             (*self.tunnel).request_interrupt_window = i32::from(requested);
329         }
330     }
331 
332     /// Checks if we can inject an interrupt into the VCPU.
ready_for_interrupt(&self) -> bool333     fn ready_for_interrupt(&self) -> bool {
334         // SAFETY:
335         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
336         // kernel told us how large it was.
337         unsafe { (*self.tunnel).ready_for_interrupt_injection != 0 }
338     }
339 
340     /// Injects interrupt vector `irq` into the VCPU.
interrupt(&self, irq: u8) -> Result<()>341     fn interrupt(&self, irq: u8) -> Result<()> {
342         let irq: u32 = irq.into();
343         // TODO(b/315998194): Add safety comment
344         #[allow(clippy::undocumented_unsafe_blocks)]
345         let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_INTERRUPT, &irq) };
346         if ret != 0 {
347             return errno_result();
348         }
349         Ok(())
350     }
351 
352     /// Injects a non-maskable interrupt into the VCPU.
inject_nmi(&self) -> Result<()>353     fn inject_nmi(&self) -> Result<()> {
354         warn!("HAXM does not support injecting NMIs");
355         Ok(())
356     }
357 
358     /// Gets the VCPU general purpose registers.
get_regs(&self) -> Result<Regs>359     fn get_regs(&self) -> Result<Regs> {
360         Ok(self.get_vcpu_state()?.get_regs())
361     }
362 
363     /// Sets the VCPU general purpose registers.
set_regs(&self, regs: &Regs) -> Result<()>364     fn set_regs(&self, regs: &Regs) -> Result<()> {
365         let mut state = self.get_vcpu_state()?;
366         state.set_regs(regs);
367         self.set_vcpu_state(&mut state)?;
368         Ok(())
369     }
370 
371     /// Gets the VCPU special registers.
get_sregs(&self) -> Result<Sregs>372     fn get_sregs(&self) -> Result<Sregs> {
373         Ok(self.get_vcpu_state()?.get_sregs())
374     }
375 
376     /// Sets the VCPU special registers.
set_sregs(&self, sregs: &Sregs) -> Result<()>377     fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
378         let mut state = self.get_vcpu_state()?;
379         state.set_sregs(sregs);
380         self.set_vcpu_state(&mut state)?;
381         Ok(())
382     }
383 
384     /// Gets the VCPU FPU registers.
get_fpu(&self) -> Result<Fpu>385     fn get_fpu(&self) -> Result<Fpu> {
386         let mut fpu = fx_layout::default();
387         // TODO(b/315998194): Add safety comment
388         #[allow(clippy::undocumented_unsafe_blocks)]
389         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU, &mut fpu) };
390 
391         if ret != 0 {
392             return errno_result();
393         }
394 
395         Ok(Fpu::from(&fpu))
396     }
397 
398     /// Sets the VCPU FPU registers.
set_fpu(&self, fpu: &Fpu) -> Result<()>399     fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
400         let mut current_fpu = fx_layout::default();
401         // TODO(b/315998194): Add safety comment
402         #[allow(clippy::undocumented_unsafe_blocks)]
403         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU, &mut current_fpu) };
404 
405         if ret != 0 {
406             return errno_result();
407         }
408 
409         let mut new_fpu = fx_layout::from(fpu);
410 
411         // the mxcsr mask is something that isn't part of the Fpu state, so we make the new
412         // fpu state's mxcsr_mask matches its current value
413         new_fpu.mxcsr_mask = current_fpu.mxcsr_mask;
414 
415         // TODO(b/315998194): Add safety comment
416         #[allow(clippy::undocumented_unsafe_blocks)]
417         let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_SET_FPU, &new_fpu) };
418 
419         if ret != 0 {
420             return errno_result();
421         }
422 
423         Ok(())
424     }
425 
get_xsave(&self) -> Result<Xsave>426     fn get_xsave(&self) -> Result<Xsave> {
427         Err(Error::new(EOPNOTSUPP))
428     }
429 
set_xsave(&self, _xsave: &Xsave) -> Result<()>430     fn set_xsave(&self, _xsave: &Xsave) -> Result<()> {
431         Err(Error::new(EOPNOTSUPP))
432     }
433 
get_interrupt_state(&self) -> Result<serde_json::Value>434     fn get_interrupt_state(&self) -> Result<serde_json::Value> {
435         Err(Error::new(EOPNOTSUPP))
436     }
437 
set_interrupt_state(&self, _data: serde_json::Value) -> Result<()>438     fn set_interrupt_state(&self, _data: serde_json::Value) -> Result<()> {
439         Err(Error::new(EOPNOTSUPP))
440     }
441 
442     /// Gets the VCPU debug registers.
get_debugregs(&self) -> Result<DebugRegs>443     fn get_debugregs(&self) -> Result<DebugRegs> {
444         Ok(self.get_vcpu_state()?.get_debugregs())
445     }
446 
447     /// Sets the VCPU debug registers.
set_debugregs(&self, debugregs: &DebugRegs) -> Result<()>448     fn set_debugregs(&self, debugregs: &DebugRegs) -> Result<()> {
449         let mut state = self.get_vcpu_state()?;
450         state.set_debugregs(debugregs);
451         self.set_vcpu_state(&mut state)?;
452         Ok(())
453     }
454 
455     /// Gets the VCPU extended control registers.
get_xcrs(&self) -> Result<BTreeMap<u32, u64>>456     fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
457         // Haxm does not support getting XCRs
458         Err(Error::new(libc::ENXIO))
459     }
460 
461     /// Sets a VCPU extended control register.
set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()>462     fn set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()> {
463         // Haxm does not support setting XCRs
464         Err(Error::new(libc::ENXIO))
465     }
466 
467     /// Gets the value of one model-specific register.
get_msr(&self, msr_index: u32) -> Result<u64>468     fn get_msr(&self, msr_index: u32) -> Result<u64> {
469         let mut msr_data = hax_msr_data {
470             nr_msr: 1,
471             ..Default::default()
472         };
473         msr_data.entries[0].entry = u64::from(msr_index);
474 
475         // TODO(b/315998194): Add safety comment
476         #[allow(clippy::undocumented_unsafe_blocks)]
477         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS, &mut msr_data) };
478         if ret != 0 {
479             return errno_result();
480         }
481 
482         Ok(msr_data.entries[0].value)
483     }
484 
get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>485     fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
486         Err(Error::new(EOPNOTSUPP))
487     }
488 
489     /// Sets the value of one model-specific register.
set_msr(&self, msr_index: u32, value: u64) -> Result<()>490     fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
491         let mut msr_data = hax_msr_data {
492             nr_msr: 1,
493             ..Default::default()
494         };
495         msr_data.entries[0].entry = u64::from(msr_index);
496         msr_data.entries[0].value = value;
497 
498         // TODO(b/315998194): Add safety comment
499         #[allow(clippy::undocumented_unsafe_blocks)]
500         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS, &mut msr_data) };
501         if ret != 0 {
502             return errno_result();
503         }
504 
505         Ok(())
506     }
507 
508     /// Sets up the data returned by the CPUID instruction.
set_cpuid(&self, cpuid: &CpuId) -> Result<()>509     fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
510         let total = cpuid.cpu_id_entries.len();
511         let mut hax = vec_with_array_field::<hax_cpuid, hax_cpuid_entry>(total);
512         hax[0].total = total as u32;
513         // TODO(b/315998194): Add safety comment
514         #[allow(clippy::undocumented_unsafe_blocks)]
515         let entries = unsafe { hax[0].entries.as_mut_slice(total) };
516         for (i, e) in cpuid.cpu_id_entries.iter().enumerate() {
517             entries[i] = hax_cpuid_entry::from(e);
518         }
519 
520         // TODO(b/315998194): Add safety comment
521         #[allow(clippy::undocumented_unsafe_blocks)]
522         let ret = unsafe {
523             ioctl_with_ptr_sized(
524                 self,
525                 HAX_VCPU_IOCTL_SET_CPUID,
526                 hax.as_ptr(),
527                 size_of::<hax_cpuid>() + total * size_of::<hax_cpuid_entry>(),
528             )
529         };
530 
531         if ret != 0 {
532             return errno_result();
533         }
534         Ok(())
535     }
536 
537     /// This function should be called after `Vcpu::run` returns `VcpuExit::Cpuid`, and `entry`
538     /// should represent the result of emulating the CPUID instruction. The `handle_cpuid` function
539     /// will then set the appropriate registers on the vcpu.
540     /// HAXM does not support the VcpuExit::Cpuid exit type.
handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()>541     fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
542         Err(Error::new(ENXIO))
543     }
544 
set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()>545     fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
546         // TODO(b/173807302): Implement this
547         Err(Error::new(ENOENT))
548     }
549 
restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()>550     fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
551         // HAXM sets TSC_OFFSET based on what we set TSC to; however, it does
552         // not yet handle syncing. This means it computes
553         // TSC_OFFSET = new_tsc - rdtsc(), so if we want to target the same
554         // offset value, we need new_tsc = rdtsc() + target_offset. This is what
555         // Self::set_tsc_offset does.
556         //
557         // TODO(b/311793539): haxm doesn't yet support syncing TSCs across VCPUs
558         // if the TSC value is non-zero. Once we have that support, we can
559         // switch to calling Self::set_tsc_value here with the common host
560         // reference moment. (Alternatively, we may just expose a way to set the
561         // offset directly.)
562         self.set_tsc_offset(tsc_offset)
563     }
564 }
565 
566 struct VcpuState {
567     state: vcpu_state_t,
568 }
569 
570 impl VcpuState {
get_regs(&self) -> Regs571     fn get_regs(&self) -> Regs {
572         // TODO(b/315998194): Add safety comment
573         #[allow(clippy::undocumented_unsafe_blocks)]
574         unsafe {
575             Regs {
576                 rax: self
577                     .state
578                     .__bindgen_anon_1
579                     .__bindgen_anon_1
580                     .__bindgen_anon_1
581                     .rax,
582                 rbx: self
583                     .state
584                     .__bindgen_anon_1
585                     .__bindgen_anon_1
586                     .__bindgen_anon_4
587                     .rbx,
588                 rcx: self
589                     .state
590                     .__bindgen_anon_1
591                     .__bindgen_anon_1
592                     .__bindgen_anon_2
593                     .rcx,
594                 rdx: self
595                     .state
596                     .__bindgen_anon_1
597                     .__bindgen_anon_1
598                     .__bindgen_anon_3
599                     .rdx,
600                 rsi: self
601                     .state
602                     .__bindgen_anon_1
603                     .__bindgen_anon_1
604                     .__bindgen_anon_7
605                     .rsi,
606                 rdi: self
607                     .state
608                     .__bindgen_anon_1
609                     .__bindgen_anon_1
610                     .__bindgen_anon_8
611                     .rdi,
612                 rsp: self
613                     .state
614                     .__bindgen_anon_1
615                     .__bindgen_anon_1
616                     .__bindgen_anon_5
617                     .rsp,
618                 rbp: self
619                     .state
620                     .__bindgen_anon_1
621                     .__bindgen_anon_1
622                     .__bindgen_anon_6
623                     .rbp,
624                 r8: self.state.__bindgen_anon_1.__bindgen_anon_1.r8,
625                 r9: self.state.__bindgen_anon_1.__bindgen_anon_1.r9,
626                 r10: self.state.__bindgen_anon_1.__bindgen_anon_1.r10,
627                 r11: self.state.__bindgen_anon_1.__bindgen_anon_1.r11,
628                 r12: self.state.__bindgen_anon_1.__bindgen_anon_1.r12,
629                 r13: self.state.__bindgen_anon_1.__bindgen_anon_1.r13,
630                 r14: self.state.__bindgen_anon_1.__bindgen_anon_1.r14,
631                 r15: self.state.__bindgen_anon_1.__bindgen_anon_1.r15,
632                 rip: self.state.__bindgen_anon_2.rip,
633                 rflags: self.state.__bindgen_anon_3.rflags,
634             }
635         }
636     }
637 
set_regs(&mut self, regs: &Regs)638     fn set_regs(&mut self, regs: &Regs) {
639         self.state
640             .__bindgen_anon_1
641             .__bindgen_anon_1
642             .__bindgen_anon_1
643             .rax = regs.rax;
644         self.state
645             .__bindgen_anon_1
646             .__bindgen_anon_1
647             .__bindgen_anon_4
648             .rbx = regs.rbx;
649         self.state
650             .__bindgen_anon_1
651             .__bindgen_anon_1
652             .__bindgen_anon_2
653             .rcx = regs.rcx;
654         self.state
655             .__bindgen_anon_1
656             .__bindgen_anon_1
657             .__bindgen_anon_3
658             .rdx = regs.rdx;
659         self.state
660             .__bindgen_anon_1
661             .__bindgen_anon_1
662             .__bindgen_anon_7
663             .rsi = regs.rsi;
664         self.state
665             .__bindgen_anon_1
666             .__bindgen_anon_1
667             .__bindgen_anon_8
668             .rdi = regs.rdi;
669         self.state
670             .__bindgen_anon_1
671             .__bindgen_anon_1
672             .__bindgen_anon_5
673             .rsp = regs.rsp;
674         self.state
675             .__bindgen_anon_1
676             .__bindgen_anon_1
677             .__bindgen_anon_6
678             .rbp = regs.rbp;
679         self.state.__bindgen_anon_1.__bindgen_anon_1.r8 = regs.r8;
680         self.state.__bindgen_anon_1.__bindgen_anon_1.r9 = regs.r9;
681         self.state.__bindgen_anon_1.__bindgen_anon_1.r10 = regs.r10;
682         self.state.__bindgen_anon_1.__bindgen_anon_1.r11 = regs.r11;
683         self.state.__bindgen_anon_1.__bindgen_anon_1.r12 = regs.r12;
684         self.state.__bindgen_anon_1.__bindgen_anon_1.r13 = regs.r13;
685         self.state.__bindgen_anon_1.__bindgen_anon_1.r14 = regs.r14;
686         self.state.__bindgen_anon_1.__bindgen_anon_1.r15 = regs.r15;
687         self.state.__bindgen_anon_2.rip = regs.rip;
688         self.state.__bindgen_anon_3.rflags = regs.rflags;
689     }
690 
get_sregs(&self) -> Sregs691     fn get_sregs(&self) -> Sregs {
692         Sregs {
693             cs: Segment::from(&self.state.cs),
694             ds: Segment::from(&self.state.ds),
695             es: Segment::from(&self.state.es),
696             fs: Segment::from(&self.state.fs),
697             gs: Segment::from(&self.state.gs),
698             ss: Segment::from(&self.state.ss),
699             tr: Segment::from(&self.state.tr),
700             ldt: Segment::from(&self.state.ldt),
701             gdt: DescriptorTable::from(&self.state.gdt),
702             idt: DescriptorTable::from(&self.state.idt),
703             cr0: self.state.cr0,
704             cr2: self.state.cr2,
705             cr3: self.state.cr3,
706             cr4: self.state.cr4,
707             // HAXM does not support setting cr8
708             cr8: 0,
709             efer: self.state.efer as u64,
710         }
711     }
712 
set_sregs(&mut self, sregs: &Sregs)713     fn set_sregs(&mut self, sregs: &Sregs) {
714         self.state.cs = segment_desc_t::from(&sregs.cs);
715         self.state.ds = segment_desc_t::from(&sregs.ds);
716         self.state.es = segment_desc_t::from(&sregs.es);
717         self.state.fs = segment_desc_t::from(&sregs.fs);
718         self.state.gs = segment_desc_t::from(&sregs.gs);
719         self.state.ss = segment_desc_t::from(&sregs.ss);
720         self.state.tr = segment_desc_t::from(&sregs.tr);
721         self.state.ldt = segment_desc_t::from(&sregs.ldt);
722         self.state.gdt = segment_desc_t::from(&sregs.gdt);
723         self.state.idt = segment_desc_t::from(&sregs.idt);
724         self.state.cr0 = sregs.cr0;
725         self.state.cr2 = sregs.cr2;
726         self.state.cr3 = sregs.cr3;
727         self.state.cr4 = sregs.cr4;
728         self.state.efer = sregs.efer as u32;
729     }
730 
get_debugregs(&self) -> DebugRegs731     fn get_debugregs(&self) -> DebugRegs {
732         DebugRegs {
733             db: [
734                 self.state.dr0,
735                 self.state.dr1,
736                 self.state.dr2,
737                 self.state.dr3,
738             ],
739             dr6: self.state.dr6,
740             dr7: self.state.dr7,
741         }
742     }
743 
set_debugregs(&mut self, debugregs: &DebugRegs)744     fn set_debugregs(&mut self, debugregs: &DebugRegs) {
745         self.state.dr0 = debugregs.db[0];
746         self.state.dr1 = debugregs.db[1];
747         self.state.dr2 = debugregs.db[2];
748         self.state.dr3 = debugregs.db[3];
749         self.state.dr6 = debugregs.dr6;
750         self.state.dr7 = debugregs.dr7;
751     }
752 }
753 
754 // HAXM's segment descriptor format matches exactly with the VMCS structure. The format
755 // of the AR bits is described in the Intel System Programming Guide Part 3, chapter 24.4.1,
756 // table 24-2. The main confusing thing is that the type_ field in haxm is 4 bits, meaning
757 // the 3 least significant bits represent the normal type field, and the most significant
758 // bit represents the "descriptor type" field.
759 
760 impl From<&segment_desc_t> for Segment {
from(item: &segment_desc_t) -> Self761     fn from(item: &segment_desc_t) -> Self {
762         // TODO(b/315998194): Add safety comment
763         #[allow(clippy::undocumented_unsafe_blocks)]
764         unsafe {
765             Segment {
766                 base: item.base,
767                 limit_bytes: item.limit,
768                 selector: item.selector,
769                 type_: item.__bindgen_anon_1.__bindgen_anon_1.type_() as u8,
770                 present: item.__bindgen_anon_1.__bindgen_anon_1.present() as u8,
771                 dpl: item.__bindgen_anon_1.__bindgen_anon_1.dpl() as u8,
772                 db: item.__bindgen_anon_1.__bindgen_anon_1.operand_size() as u8,
773                 s: item.__bindgen_anon_1.__bindgen_anon_1.desc() as u8,
774                 l: item.__bindgen_anon_1.__bindgen_anon_1.long_mode() as u8,
775                 g: item.__bindgen_anon_1.__bindgen_anon_1.granularity() as u8,
776                 avl: item.__bindgen_anon_1.__bindgen_anon_1.available() as u8,
777             }
778         }
779     }
780 }
781 
782 impl From<&Segment> for segment_desc_t {
from(item: &Segment) -> Self783     fn from(item: &Segment) -> Self {
784         let mut segment = segment_desc_t {
785             base: item.base,
786             limit: item.limit_bytes,
787             selector: item.selector,
788             ..Default::default()
789         };
790 
791         // TODO(b/315998194): Add safety comment
792         #[allow(clippy::undocumented_unsafe_blocks)]
793         unsafe {
794             segment
795                 .__bindgen_anon_1
796                 .__bindgen_anon_1
797                 .set_type(item.type_ as u32);
798             segment
799                 .__bindgen_anon_1
800                 .__bindgen_anon_1
801                 .set_desc(item.s as u32);
802             segment
803                 .__bindgen_anon_1
804                 .__bindgen_anon_1
805                 .set_present(item.present as u32);
806             segment
807                 .__bindgen_anon_1
808                 .__bindgen_anon_1
809                 .set_dpl(item.dpl as u32);
810             segment
811                 .__bindgen_anon_1
812                 .__bindgen_anon_1
813                 .set_operand_size(item.db as u32);
814             segment
815                 .__bindgen_anon_1
816                 .__bindgen_anon_1
817                 .set_long_mode(item.l as u32);
818             segment
819                 .__bindgen_anon_1
820                 .__bindgen_anon_1
821                 .set_granularity(item.g as u32);
822             segment
823                 .__bindgen_anon_1
824                 .__bindgen_anon_1
825                 .set_available(item.avl as u32);
826         }
827 
828         segment
829     }
830 }
831 
832 impl From<&segment_desc_t> for DescriptorTable {
from(item: &segment_desc_t) -> Self833     fn from(item: &segment_desc_t) -> Self {
834         DescriptorTable {
835             base: item.base,
836             limit: item.limit as u16,
837         }
838     }
839 }
840 
841 impl From<&DescriptorTable> for segment_desc_t {
from(item: &DescriptorTable) -> Self842     fn from(item: &DescriptorTable) -> Self {
843         segment_desc_t {
844             base: item.base,
845             limit: item.limit as u32,
846             ..Default::default()
847         }
848     }
849 }
850 
851 impl From<&fx_layout> for Fpu {
from(item: &fx_layout) -> Self852     fn from(item: &fx_layout) -> Self {
853         let mut fpu = Fpu {
854             fpr: FpuReg::from_16byte_arrays(&item.st_mm),
855             fcw: item.fcw,
856             fsw: item.fsw,
857             ftwx: item.ftw,
858             last_opcode: item.fop,
859             // SAFETY: trivially safe
860             last_ip: unsafe { item.__bindgen_anon_1.fpu_ip },
861             // SAFETY: trivially safe
862             last_dp: unsafe { item.__bindgen_anon_2.fpu_dp },
863             xmm: [[0; 16]; 16],
864             mxcsr: item.mxcsr,
865         };
866 
867         fpu.xmm[..8].copy_from_slice(&item.mmx_1[..]);
868         fpu.xmm[8..].copy_from_slice(&item.mmx_2[..]);
869 
870         fpu
871     }
872 }
873 
874 impl From<&Fpu> for fx_layout {
from(item: &Fpu) -> Self875     fn from(item: &Fpu) -> Self {
876         let mut fpu = fx_layout {
877             fcw: item.fcw,
878             fsw: item.fsw,
879             ftw: item.ftwx,
880             res1: 0,
881             fop: item.last_opcode,
882             __bindgen_anon_1: fx_layout__bindgen_ty_1 {
883                 fpu_ip: item.last_ip,
884             },
885             __bindgen_anon_2: fx_layout__bindgen_ty_2 {
886                 fpu_dp: item.last_dp,
887             },
888             mxcsr: item.mxcsr,
889             mxcsr_mask: 0,
890             st_mm: FpuReg::to_16byte_arrays(&item.fpr),
891             mmx_1: [[0; 16]; 8],
892             mmx_2: [[0; 16]; 8],
893             pad: [0; 96],
894         };
895 
896         fpu.mmx_1.copy_from_slice(&item.xmm[..8]);
897         fpu.mmx_2.copy_from_slice(&item.xmm[8..]);
898 
899         fpu
900     }
901 }
902 
903 impl From<&hax_cpuid_entry> for CpuIdEntry {
from(item: &hax_cpuid_entry) -> Self904     fn from(item: &hax_cpuid_entry) -> Self {
905         CpuIdEntry {
906             function: item.function,
907             index: item.index,
908             flags: item.flags,
909             cpuid: CpuidResult {
910                 eax: item.eax,
911                 ebx: item.ebx,
912                 ecx: item.ecx,
913                 edx: item.edx,
914             },
915         }
916     }
917 }
918 
919 impl From<&CpuIdEntry> for hax_cpuid_entry {
from(item: &CpuIdEntry) -> Self920     fn from(item: &CpuIdEntry) -> Self {
921         hax_cpuid_entry {
922             function: item.function,
923             index: item.index,
924             flags: item.flags,
925             eax: item.cpuid.eax,
926             ebx: item.cpuid.ebx,
927             ecx: item.cpuid.ecx,
928             edx: item.cpuid.edx,
929             pad: Default::default(),
930         }
931     }
932 }
933 
934 // TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
935 #[cfg(test)]
936 #[cfg(feature = "enable_haxm_tests")]
937 mod tests {
938     use vm_memory::GuestAddress;
939     use vm_memory::GuestMemory;
940 
941     use super::*;
942     use crate::VmX86_64;
943 
944     // EFER Bits
945     const EFER_SCE: u64 = 0x00000001;
946     const EFER_LME: u64 = 0x00000100;
947     const EFER_LMA: u64 = 0x00000400;
948     const EFER_SVME: u64 = 1 << 12;
949 
950     // CR0 bits
951     const CR0_PG: u64 = 1 << 31;
952 
953     #[test]
get_regs()954     fn get_regs() {
955         let haxm = Haxm::new().expect("failed to instantiate HAXM");
956         let mem =
957             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
958         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
959         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
960 
961         vcpu.get_regs().expect("failed to get regs");
962     }
963 
964     #[test]
get_fpu()965     fn get_fpu() {
966         let haxm = Haxm::new().expect("failed to instantiate HAXM");
967         let mem =
968             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
969         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
970         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
971 
972         vcpu.get_fpu().expect("failed to get fpu");
973     }
974 
975     #[test]
set_msr()976     fn set_msr() {
977         let haxm = Haxm::new().expect("failed to instantiate HAXM");
978         let mem =
979             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
980         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
981         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
982 
983         vcpu.set_msr(38, 0x300).expect("failed to set MSR");
984     }
985 
986     #[test]
get_msr()987     fn get_msr() {
988         let haxm = Haxm::new().expect("failed to instantiate HAXM");
989         let mem =
990             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
991         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
992         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
993 
994         let _value = vcpu.get_msr(38).expect("failed to get MSR");
995     }
996 
997     #[test]
set_cpuid()998     fn set_cpuid() {
999         let haxm = Haxm::new().expect("failed to instantiate HAXM");
1000         let mem =
1001             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1002         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1003         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1004 
1005         let mut cpuid = haxm
1006             .get_supported_cpuid()
1007             .expect("failed to get supported cpuids");
1008         for entry in &mut cpuid.cpu_id_entries {
1009             if entry.function == 1 {
1010                 // Disable XSAVE and OSXSAVE
1011                 entry.cpuid.ecx &= !(1 << 26);
1012                 entry.cpuid.ecx &= !(1 << 27);
1013             }
1014         }
1015 
1016         vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
1017     }
1018 
1019     #[test]
set_efer()1020     fn set_efer() {
1021         // HAXM efer setting requires some extra code, so we have this test specifically
1022         // checking that it's working.
1023         let haxm = Haxm::new().expect("failed to instantiate HAXM");
1024         let mem =
1025             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1026         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1027         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1028 
1029         let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
1030         // Initial value should be 0
1031         assert_eq!(sregs.efer & !EFER_SVME, 0);
1032 
1033         // Enable and activate long mode
1034         sregs.efer = EFER_LMA | EFER_LME;
1035         // Need to enable paging or LMA will be turned off
1036         sregs.cr0 |= CR0_PG;
1037         vcpu.set_sregs(&sregs).expect("failed to set sregs");
1038 
1039         // Verify that setting stuck
1040         let sregs = vcpu.get_sregs().expect("failed to get sregs");
1041         assert_eq!(sregs.efer & !EFER_SVME, EFER_LMA | EFER_LME);
1042 
1043         // IA32_EFER register value should match
1044         let efer = vcpu.get_msr(IA32_EFER).expect("failed to get msr");
1045         assert_eq!(efer & !EFER_SVME, EFER_LMA | EFER_LME);
1046 
1047         // Enable SCE via set_msrs
1048         vcpu.set_msr(IA32_EFER, efer | EFER_SCE)
1049             .expect("failed to set msr");
1050 
1051         // Verify that setting stuck
1052         let sregs = vcpu.get_sregs().expect("failed to get sregs");
1053         assert_eq!(sregs.efer & !EFER_SVME, EFER_SCE | EFER_LME | EFER_LMA);
1054         let new_efer = vcpu.get_msr(IA32_EFER).expect("failed to get msrs");
1055         assert_eq!(new_efer & !EFER_SVME, EFER_SCE | EFER_LME | EFER_LMA);
1056     }
1057 }
1058