1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::arch::x86_64::CpuidResult;
6 use std::collections::BTreeMap;
7
8 use base::errno_result;
9 use base::error;
10 use base::ioctl;
11 use base::ioctl_with_mut_ptr;
12 use base::ioctl_with_mut_ref;
13 use base::ioctl_with_ptr;
14 use base::ioctl_with_ref;
15 use base::ioctl_with_val;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::IoctlNr;
19 use base::MappedRegion;
20 use base::Result;
21 use data_model::vec_with_array_field;
22 use data_model::FlexibleArrayWrapper;
23 use kvm_sys::*;
24 use libc::E2BIG;
25 use libc::EAGAIN;
26 use libc::EIO;
27 use libc::ENXIO;
28 use serde::Deserialize;
29 use serde::Serialize;
30 use vm_memory::GuestAddress;
31
32 use super::Config;
33 use super::Kvm;
34 use super::KvmVcpu;
35 use super::KvmVm;
36 use crate::host_phys_addr_bits;
37 use crate::ClockState;
38 use crate::CpuId;
39 use crate::CpuIdEntry;
40 use crate::DebugRegs;
41 use crate::DescriptorTable;
42 use crate::DeviceKind;
43 use crate::Fpu;
44 use crate::FpuReg;
45 use crate::HypervisorX86_64;
46 use crate::IoapicRedirectionTableEntry;
47 use crate::IoapicState;
48 use crate::IrqSourceChip;
49 use crate::LapicState;
50 use crate::PicSelect;
51 use crate::PicState;
52 use crate::PitChannelState;
53 use crate::PitState;
54 use crate::ProtectionType;
55 use crate::Regs;
56 use crate::Segment;
57 use crate::Sregs;
58 use crate::VcpuExit;
59 use crate::VcpuX86_64;
60 use crate::VmCap;
61 use crate::VmX86_64;
62 use crate::Xsave;
63 use crate::NUM_IOAPIC_PINS;
64
65 type KvmCpuId = FlexibleArrayWrapper<kvm_cpuid2, kvm_cpuid_entry2>;
66 const KVM_XSAVE_MAX_SIZE: usize = 4096;
67 const MSR_IA32_APICBASE: u32 = 0x0000001b;
68
69 #[derive(Debug, Clone, Serialize, Deserialize)]
70 pub struct VcpuEvents {
71 pub exception: VcpuExceptionState,
72 pub interrupt: VcpuInterruptState,
73 pub nmi: VcpuNmiState,
74 pub sipi_vector: Option<u32>,
75 pub smi: VcpuSmiState,
76 pub triple_fault: VcpuTripleFaultState,
77 pub exception_payload: Option<u64>,
78 }
79
80 #[derive(Debug, Clone, Serialize, Deserialize)]
81 pub struct VcpuExceptionState {
82 pub injected: bool,
83 pub nr: u8,
84 pub has_error_code: bool,
85 pub pending: Option<bool>,
86 pub error_code: u32,
87 }
88
89 #[derive(Debug, Clone, Serialize, Deserialize)]
90 pub struct VcpuInterruptState {
91 pub injected: bool,
92 pub nr: u8,
93 pub soft: bool,
94 pub shadow: Option<u8>,
95 }
96
97 #[derive(Debug, Clone, Serialize, Deserialize)]
98 pub struct VcpuNmiState {
99 pub injected: bool,
100 pub pending: Option<bool>,
101 pub masked: bool,
102 }
103
104 #[derive(Debug, Clone, Serialize, Deserialize)]
105 pub struct VcpuSmiState {
106 pub smm: Option<bool>,
107 pub pending: bool,
108 pub smm_inside_nmi: bool,
109 pub latched_init: u8,
110 }
111
112 #[derive(Debug, Clone, Serialize, Deserialize)]
113 pub struct VcpuTripleFaultState {
114 pub pending: Option<bool>,
115 }
116
get_cpuid_with_initial_capacity<T: AsRawDescriptor>( descriptor: &T, kind: IoctlNr, initial_capacity: usize, ) -> Result<CpuId>117 pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
118 descriptor: &T,
119 kind: IoctlNr,
120 initial_capacity: usize,
121 ) -> Result<CpuId> {
122 let mut entries: usize = initial_capacity;
123
124 loop {
125 let mut kvm_cpuid = KvmCpuId::new(entries);
126
127 let ret = {
128 // SAFETY:
129 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
130 // memory allocated for the struct. The limit is read from nent within KvmCpuId,
131 // which is set to the allocated size above.
132 unsafe { ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) }
133 };
134 if ret < 0 {
135 let err = Error::last();
136 match err.errno() {
137 E2BIG => {
138 // double the available memory for cpuid entries for kvm.
139 if let Some(val) = entries.checked_mul(2) {
140 entries = val;
141 } else {
142 return Err(err);
143 }
144 }
145 _ => return Err(err),
146 }
147 } else {
148 return Ok(CpuId::from(&kvm_cpuid));
149 }
150 }
151 }
152
153 impl Kvm {
get_cpuid(&self, kind: IoctlNr) -> Result<CpuId>154 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
155 const KVM_MAX_ENTRIES: usize = 256;
156 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
157 }
158
get_vm_type(&self, protection_type: ProtectionType) -> Result<u32>159 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
160 if protection_type.isolates_memory() {
161 Ok(KVM_X86_PKVM_PROTECTED_VM)
162 } else {
163 Ok(0)
164 }
165 }
166
167 /// Get the size of guest physical addresses in bits.
get_guest_phys_addr_bits(&self) -> u8168 pub fn get_guest_phys_addr_bits(&self) -> u8 {
169 // Assume the guest physical address size is the same as the host.
170 host_phys_addr_bits()
171 }
172 }
173
174 impl HypervisorX86_64 for Kvm {
get_supported_cpuid(&self) -> Result<CpuId>175 fn get_supported_cpuid(&self) -> Result<CpuId> {
176 self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
177 }
178
get_msr_index_list(&self) -> Result<Vec<u32>>179 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
180 const MAX_KVM_MSR_ENTRIES: usize = 256;
181
182 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
183 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
184
185 let ret = {
186 // SAFETY:
187 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
188 // allocated for the struct. The limit is read from nmsrs, which is set to the allocated
189 // size (MAX_KVM_MSR_ENTRIES) above.
190 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list[0]) }
191 };
192 if ret < 0 {
193 return errno_result();
194 }
195
196 let mut nmsrs = msr_list[0].nmsrs;
197
198 // SAFETY:
199 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
200 // the length we originally allocated with eliminates the possibility of overflow.
201 let indices: &[u32] = unsafe {
202 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
203 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
204 }
205 msr_list[0].indices.as_slice(nmsrs as usize)
206 };
207
208 Ok(indices.to_vec())
209 }
210 }
211
212 impl KvmVm {
213 /// Does platform specific initialization for the KvmVm.
init_arch(&self, _cfg: &Config) -> Result<()>214 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
215 Ok(())
216 }
217
218 /// Whether running under pKVM.
is_pkvm(&self) -> bool219 pub fn is_pkvm(&self) -> bool {
220 false
221 }
222
223 /// Checks if a particular `VmCap` is available, or returns None if arch-independent
224 /// Vm.check_capability() should handle the check.
check_capability_arch(&self, c: VmCap) -> Option<bool>225 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
226 match c {
227 VmCap::PvClock => Some(true),
228 _ => None,
229 }
230 }
231
232 /// Returns the params to pass to KVM_CREATE_DEVICE for a `kind` device on this arch, or None to
233 /// let the arch-independent `KvmVm::create_device` handle it.
get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device>234 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
235 None
236 }
237
238 /// Arch-specific implementation of `Vm::get_pvclock`.
get_pvclock_arch(&self) -> Result<ClockState>239 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
240 let mut clock_data: kvm_clock_data = Default::default();
241 let ret =
242 // SAFETY:
243 // Safe because we know that our file is a VM fd, we know the kernel will only write correct
244 // amount of memory to our pointer, and we verify the return result.
245 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
246 if ret == 0 {
247 Ok(ClockState::from(&clock_data))
248 } else {
249 errno_result()
250 }
251 }
252
253 /// Arch-specific implementation of `Vm::set_pvclock`.
set_pvclock_arch(&self, state: &ClockState) -> Result<()>254 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
255 let clock_data = kvm_clock_data::from(state);
256 // SAFETY:
257 // Safe because we know that our file is a VM fd, we know the kernel will only read correct
258 // amount of memory from our pointer, and we verify the return result.
259 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
260 if ret == 0 {
261 Ok(())
262 } else {
263 errno_result()
264 }
265 }
266
267 /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
268 ///
269 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state>270 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
271 let mut irqchip_state = kvm_irqchip {
272 chip_id: id as u32,
273 ..Default::default()
274 };
275 let ret = {
276 // SAFETY:
277 // Safe because we know our file is a VM fd, we know the kernel will only write
278 // correct amount of memory to our pointer, and we verify the return result.
279 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
280 };
281 if ret == 0 {
282 Ok(
283 // SAFETY:
284 // Safe as we know that we are retrieving data related to the
285 // PIC (primary or secondary) and not IOAPIC.
286 unsafe { irqchip_state.chip.pic },
287 )
288 } else {
289 errno_result()
290 }
291 }
292
293 /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
294 ///
295 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()>296 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
297 let mut irqchip_state = kvm_irqchip {
298 chip_id: id as u32,
299 ..Default::default()
300 };
301 irqchip_state.chip.pic = *state;
302 // SAFETY:
303 // Safe because we know that our file is a VM fd, we know the kernel will only read
304 // correct amount of memory from our pointer, and we verify the return result.
305 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
306 if ret == 0 {
307 Ok(())
308 } else {
309 errno_result()
310 }
311 }
312
313 /// Retrieves the number of pins for emulated IO-APIC.
get_ioapic_num_pins(&self) -> Result<usize>314 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
315 Ok(NUM_IOAPIC_PINS)
316 }
317
318 /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
319 ///
320 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_ioapic_state(&self) -> Result<kvm_ioapic_state>321 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
322 let mut irqchip_state = kvm_irqchip {
323 chip_id: 2,
324 ..Default::default()
325 };
326 let ret = {
327 // SAFETY:
328 // Safe because we know our file is a VM fd, we know the kernel will only write
329 // correct amount of memory to our pointer, and we verify the return result.
330 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
331 };
332 if ret == 0 {
333 Ok(
334 // SAFETY:
335 // Safe as we know that we are retrieving data related to the
336 // IOAPIC and not PIC.
337 unsafe { irqchip_state.chip.ioapic },
338 )
339 } else {
340 errno_result()
341 }
342 }
343
344 /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
345 ///
346 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()>347 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
348 let mut irqchip_state = kvm_irqchip {
349 chip_id: 2,
350 ..Default::default()
351 };
352 irqchip_state.chip.ioapic = *state;
353 // SAFETY:
354 // Safe because we know that our file is a VM fd, we know the kernel will only read
355 // correct amount of memory from our pointer, and we verify the return result.
356 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
357 if ret == 0 {
358 Ok(())
359 } else {
360 errno_result()
361 }
362 }
363
364 /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
365 ///
366 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
create_pit(&self) -> Result<()>367 pub fn create_pit(&self) -> Result<()> {
368 let pit_config = kvm_pit_config::default();
369 // SAFETY:
370 // Safe because we know that our file is a VM fd, we know the kernel will only read the
371 // correct amount of memory from our pointer, and we verify the return result.
372 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
373 if ret == 0 {
374 Ok(())
375 } else {
376 errno_result()
377 }
378 }
379
380 /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
381 ///
382 /// Note that this call can only succeed after a call to `Vm::create_pit`.
get_pit_state(&self) -> Result<kvm_pit_state2>383 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
384 let mut pit_state = Default::default();
385 // SAFETY:
386 // Safe because we know that our file is a VM fd, we know the kernel will only write
387 // correct amount of memory to our pointer, and we verify the return result.
388 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
389 if ret == 0 {
390 Ok(pit_state)
391 } else {
392 errno_result()
393 }
394 }
395
396 /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
397 ///
398 /// Note that this call can only succeed after a call to `Vm::create_pit`.
set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()>399 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
400 // SAFETY:
401 // Safe because we know that our file is a VM fd, we know the kernel will only read
402 // correct amount of memory from our pointer, and we verify the return result.
403 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
404 if ret == 0 {
405 Ok(())
406 } else {
407 errno_result()
408 }
409 }
410
411 /// Set MSR_PLATFORM_INFO read access.
set_platform_info_read_access(&self, allow_read: bool) -> Result<()>412 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
413 let mut cap = kvm_enable_cap {
414 cap: KVM_CAP_MSR_PLATFORM_INFO,
415 ..Default::default()
416 };
417 cap.args[0] = allow_read as u64;
418
419 // SAFETY:
420 // Safe because we know that our file is a VM fd, we know that the
421 // kernel will only read correct amount of memory from our pointer, and
422 // we verify the return result.
423 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
424 if ret < 0 {
425 errno_result()
426 } else {
427 Ok(())
428 }
429 }
430
431 /// Enable support for split-irqchip.
enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()>432 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
433 let mut cap = kvm_enable_cap {
434 cap: KVM_CAP_SPLIT_IRQCHIP,
435 ..Default::default()
436 };
437 cap.args[0] = ioapic_pins as u64;
438 // SAFETY:
439 // safe becuase we allocated the struct and we know the kernel will read
440 // exactly the size of the struct
441 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
442 if ret < 0 {
443 errno_result()
444 } else {
445 Ok(())
446 }
447 }
448 }
449
450 impl VmX86_64 for KvmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64451 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
452 &self.kvm
453 }
454
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>455 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
456 // create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
457 // or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
458 Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
459 }
460
461 /// Sets the address of the three-page region in the VM's address space.
462 ///
463 /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
set_tss_addr(&self, addr: GuestAddress) -> Result<()>464 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
465 // SAFETY:
466 // Safe because we know that our file is a VM fd and we verify the return result.
467 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
468 if ret == 0 {
469 Ok(())
470 } else {
471 errno_result()
472 }
473 }
474
475 /// Sets the address of a one-page region in the VM's address space.
476 ///
477 /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
set_identity_map_addr(&self, addr: GuestAddress) -> Result<()>478 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
479 // SAFETY:
480 // Safe because we know that our file is a VM fd and we verify the return result.
481 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
482 if ret == 0 {
483 Ok(())
484 } else {
485 errno_result()
486 }
487 }
488 }
489
490 impl KvmVcpu {
491 /// Handles a `KVM_EXIT_SYSTEM_EVENT` with event type `KVM_SYSTEM_EVENT_RESET` with the given
492 /// event flags and returns the appropriate `VcpuExit` value for the run loop to handle.
system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit>493 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
494 Ok(VcpuExit::SystemEventReset)
495 }
496
497 /// Gets the Xsave size by checking the extension KVM_CAP_XSAVE2.
498 ///
499 /// Size should always be >=0. If size is negative, an error occurred.
500 /// If size <= 4096, XSAVE2 is not supported by the CPU or the kernel. KVM_XSAVE_MAX_SIZE is
501 /// returned (4096).
502 /// Otherwise, the size will be returned.
xsave_size(&self) -> Result<usize>503 fn xsave_size(&self) -> Result<usize> {
504 let size = {
505 // SAFETY:
506 // Safe because we know that our file is a valid VM fd
507 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
508 };
509 if size < 0 {
510 return errno_result();
511 }
512 // Safe to unwrap since we already tested for negative values
513 let size: usize = size.try_into().unwrap();
514 Ok(size.max(KVM_XSAVE_MAX_SIZE))
515 }
516
517 #[inline]
handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit>518 pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
519 match run.exit_reason {
520 KVM_EXIT_IO => Some(VcpuExit::Io),
521 KVM_EXIT_IOAPIC_EOI => {
522 // SAFETY:
523 // Safe because the exit_reason (which comes from the kernel) told us which
524 // union field to use.
525 let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
526 Some(VcpuExit::IoapicEoi { vector })
527 }
528 KVM_EXIT_HLT => Some(VcpuExit::Hlt),
529 KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
530 KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
531 KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
532 _ => None,
533 }
534 }
535 }
536
537 impl VcpuX86_64 for KvmVcpu {
538 #[allow(clippy::cast_ptr_alignment)]
set_interrupt_window_requested(&self, requested: bool)539 fn set_interrupt_window_requested(&self, requested: bool) {
540 // SAFETY:
541 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
542 // kernel told us how large it was. The pointer is page aligned so casting to a different
543 // type is well defined, hence the clippy allow attribute.
544 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
545 run.request_interrupt_window = requested.into();
546 }
547
548 #[allow(clippy::cast_ptr_alignment)]
ready_for_interrupt(&self) -> bool549 fn ready_for_interrupt(&self) -> bool {
550 // SAFETY:
551 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
552 // kernel told us how large it was. The pointer is page aligned so casting to a different
553 // type is well defined, hence the clippy allow attribute.
554 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
555 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
556 }
557
558 /// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
559 ///
560 /// While this ioctl exists on PPC and MIPS as well as x86, the semantics are different and
561 /// ChromeOS doesn't support PPC or MIPS.
interrupt(&self, irq: u8) -> Result<()>562 fn interrupt(&self, irq: u8) -> Result<()> {
563 if !self.ready_for_interrupt() {
564 return Err(Error::new(EAGAIN));
565 }
566
567 let interrupt = kvm_interrupt { irq: irq.into() };
568 // SAFETY:
569 // safe becuase we allocated the struct and we know the kernel will read
570 // exactly the size of the struct
571 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
572 if ret == 0 {
573 Ok(())
574 } else {
575 errno_result()
576 }
577 }
578
inject_nmi(&self) -> Result<()>579 fn inject_nmi(&self) -> Result<()> {
580 // SAFETY:
581 // Safe because we know that our file is a VCPU fd.
582 let ret = unsafe { ioctl(self, KVM_NMI) };
583 if ret == 0 {
584 Ok(())
585 } else {
586 errno_result()
587 }
588 }
589
get_regs(&self) -> Result<Regs>590 fn get_regs(&self) -> Result<Regs> {
591 let mut regs: kvm_regs = Default::default();
592 let ret = {
593 // SAFETY:
594 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
595 // the correct amount of memory from our pointer, and we verify the return
596 // result.
597 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
598 };
599 if ret == 0 {
600 Ok(Regs::from(®s))
601 } else {
602 errno_result()
603 }
604 }
605
set_regs(&self, regs: &Regs) -> Result<()>606 fn set_regs(&self, regs: &Regs) -> Result<()> {
607 let regs = kvm_regs::from(regs);
608 let ret = {
609 // SAFETY:
610 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
611 // the correct amount of memory from our pointer, and we verify the return
612 // result.
613 unsafe { ioctl_with_ref(self, KVM_SET_REGS, ®s) }
614 };
615 if ret == 0 {
616 Ok(())
617 } else {
618 errno_result()
619 }
620 }
621
get_sregs(&self) -> Result<Sregs>622 fn get_sregs(&self) -> Result<Sregs> {
623 let mut regs: kvm_sregs = Default::default();
624 let ret = {
625 // SAFETY:
626 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
627 // the correct amount of memory to our pointer, and we verify the return
628 // result.
629 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
630 };
631 if ret == 0 {
632 Ok(Sregs::from(®s))
633 } else {
634 errno_result()
635 }
636 }
637
set_sregs(&self, sregs: &Sregs) -> Result<()>638 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
639 // Get the current `kvm_sregs` so we can use its `apic_base` and `interrupt_bitmap`, which
640 // are not present in `Sregs`.
641 let mut kvm_sregs: kvm_sregs = Default::default();
642 // SAFETY:
643 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
644 // correct amount of memory to our pointer, and we verify the return result.
645 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
646 if ret != 0 {
647 return errno_result();
648 }
649
650 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
651 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
652 kvm_sregs.es = kvm_segment::from(&sregs.es);
653 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
654 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
655 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
656 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
657 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
658 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
659 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
660 kvm_sregs.cr0 = sregs.cr0;
661 kvm_sregs.cr2 = sregs.cr2;
662 kvm_sregs.cr3 = sregs.cr3;
663 kvm_sregs.cr4 = sregs.cr4;
664 kvm_sregs.cr8 = sregs.cr8;
665 kvm_sregs.efer = sregs.efer;
666
667 // SAFETY:
668 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
669 // correct amount of memory from our pointer, and we verify the return result.
670 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
671 if ret == 0 {
672 Ok(())
673 } else {
674 errno_result()
675 }
676 }
677
get_fpu(&self) -> Result<Fpu>678 fn get_fpu(&self) -> Result<Fpu> {
679 let mut fpu: kvm_fpu = Default::default();
680 // SAFETY:
681 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
682 // correct amount of memory to our pointer, and we verify the return result.
683 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
684 if ret == 0 {
685 Ok(Fpu::from(&fpu))
686 } else {
687 errno_result()
688 }
689 }
690
set_fpu(&self, fpu: &Fpu) -> Result<()>691 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
692 let fpu = kvm_fpu::from(fpu);
693 let ret = {
694 // SAFETY:
695 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
696 unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
697 };
698 if ret == 0 {
699 Ok(())
700 } else {
701 errno_result()
702 }
703 }
704
705 /// If the VM reports using XSave2, the function will call XSave2.
get_xsave(&self) -> Result<Xsave>706 fn get_xsave(&self) -> Result<Xsave> {
707 let size = self.xsave_size()?;
708 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
709 KVM_GET_XSAVE2
710 } else {
711 KVM_GET_XSAVE
712 };
713 let mut xsave = Xsave::new(size);
714
715 // SAFETY:
716 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
717 // correct amount of memory to our pointer, and we verify the return result.
718 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
719 if ret == 0 {
720 Ok(xsave)
721 } else {
722 errno_result()
723 }
724 }
725
set_xsave(&self, xsave: &Xsave) -> Result<()>726 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
727 let size = self.xsave_size()?;
728 // Ensure xsave is the same size as used in get_xsave.
729 // Return err if sizes don't match => not the same extensions are enabled for CPU.
730 if xsave.len() != size {
731 return Err(Error::new(EIO));
732 }
733
734 // SAFETY:
735 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
736 // correct amount of memory to our pointer, and we verify the return result.
737 // Because of the len check above, and because the layout of `struct kvm_xsave` is
738 // compatible with a slice of `u32`, we can pass the pointer to `xsave` directly.
739 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
740 if ret == 0 {
741 Ok(())
742 } else {
743 errno_result()
744 }
745 }
746
get_interrupt_state(&self) -> Result<serde_json::Value>747 fn get_interrupt_state(&self) -> Result<serde_json::Value> {
748 let mut vcpu_evts: kvm_vcpu_events = Default::default();
749 let ret = {
750 // SAFETY:
751 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
752 // the correct amount of memory to our pointer, and we verify the return
753 // result.
754 unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) }
755 };
756 if ret == 0 {
757 Ok(
758 serde_json::to_value(VcpuEvents::from(&vcpu_evts)).map_err(|e| {
759 error!("failed to serialize vcpu_events: {:?}", e);
760 Error::new(EIO)
761 })?,
762 )
763 } else {
764 errno_result()
765 }
766 }
767
set_interrupt_state(&self, data: serde_json::Value) -> Result<()>768 fn set_interrupt_state(&self, data: serde_json::Value) -> Result<()> {
769 let vcpu_events =
770 kvm_vcpu_events::from(&serde_json::from_value::<VcpuEvents>(data).map_err(|e| {
771 error!("failed to deserialize vcpu_events: {:?}", e);
772 Error::new(EIO)
773 })?);
774 let ret = {
775 // SAFETY:
776 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
777 // the correct amount of memory from our pointer, and we verify the return
778 // result.
779 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
780 };
781 if ret == 0 {
782 Ok(())
783 } else {
784 errno_result()
785 }
786 }
787
get_debugregs(&self) -> Result<DebugRegs>788 fn get_debugregs(&self) -> Result<DebugRegs> {
789 let mut regs: kvm_debugregs = Default::default();
790 // SAFETY:
791 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
792 // correct amount of memory to our pointer, and we verify the return result.
793 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
794 if ret == 0 {
795 Ok(DebugRegs::from(®s))
796 } else {
797 errno_result()
798 }
799 }
800
set_debugregs(&self, dregs: &DebugRegs) -> Result<()>801 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
802 let dregs = kvm_debugregs::from(dregs);
803 let ret = {
804 // SAFETY:
805 // Here we trust the kernel not to read past the end of the kvm_debugregs struct.
806 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
807 };
808 if ret == 0 {
809 Ok(())
810 } else {
811 errno_result()
812 }
813 }
814
get_xcrs(&self) -> Result<BTreeMap<u32, u64>>815 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
816 let mut regs: kvm_xcrs = Default::default();
817 // SAFETY:
818 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
819 // correct amount of memory to our pointer, and we verify the return result.
820 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
821 if ret < 0 {
822 return errno_result();
823 }
824
825 Ok(regs
826 .xcrs
827 .iter()
828 .take(regs.nr_xcrs as usize)
829 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
830 .collect())
831 }
832
set_xcr(&self, xcr_index: u32, value: u64) -> Result<()>833 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
834 let mut kvm_xcr = kvm_xcrs {
835 nr_xcrs: 1,
836 ..Default::default()
837 };
838 kvm_xcr.xcrs[0].xcr = xcr_index;
839 kvm_xcr.xcrs[0].value = value;
840
841 let ret = {
842 // SAFETY:
843 // Here we trust the kernel not to read past the end of the kvm_xcrs struct.
844 unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
845 };
846 if ret == 0 {
847 Ok(())
848 } else {
849 errno_result()
850 }
851 }
852
get_msr(&self, msr_index: u32) -> Result<u64>853 fn get_msr(&self, msr_index: u32) -> Result<u64> {
854 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
855 msrs[0].nmsrs = 1;
856
857 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
858 unsafe {
859 let msr_entries = msrs[0].entries.as_mut_slice(1);
860 msr_entries[0].index = msr_index;
861 }
862
863 let ret = {
864 // SAFETY:
865 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
866 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs[0]) }
867 };
868 if ret < 0 {
869 return errno_result();
870 }
871
872 // KVM_GET_MSRS returns the number of msr entries written.
873 if ret != 1 {
874 return Err(base::Error::new(libc::ENOENT));
875 }
876
877 // SAFETY:
878 // Safe because we trust the kernel to return the correct array length on success.
879 let value = unsafe {
880 let msr_entries = msrs[0].entries.as_slice(1);
881 msr_entries[0].data
882 };
883
884 Ok(value)
885 }
886
get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>887 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
888 let msr_index_list = self.kvm.get_msr_index_list()?;
889 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_index_list.len());
890 kvm_msrs[0].nmsrs = msr_index_list.len() as u32;
891 // SAFETY:
892 // Mapping the unsized array to a slice is unsafe because the length isn't known.
893 // Providing the length used to create the struct guarantees the entire slice is valid.
894 unsafe {
895 kvm_msrs[0]
896 .entries
897 .as_mut_slice(msr_index_list.len())
898 .iter_mut()
899 .zip(msr_index_list.iter())
900 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
901 }
902
903 let ret = {
904 // SAFETY:
905 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
906 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut kvm_msrs[0]) }
907 };
908 if ret < 0 {
909 return errno_result();
910 }
911
912 // KVM_GET_MSRS returns the number of msr entries written.
913 let count = ret as usize;
914 if count != msr_index_list.len() {
915 error!(
916 "failed to get all MSRs: requested {}, got {}",
917 msr_index_list.len(),
918 count,
919 );
920 return Err(base::Error::new(libc::EPERM));
921 }
922
923 // SAFETY:
924 // Safe because we trust the kernel to return the correct array length on success.
925 let msrs = unsafe {
926 BTreeMap::from_iter(
927 kvm_msrs[0]
928 .entries
929 .as_slice(count)
930 .iter()
931 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
932 )
933 };
934
935 Ok(msrs)
936 }
937
set_msr(&self, msr_index: u32, value: u64) -> Result<()>938 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
939 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
940 kvm_msrs[0].nmsrs = 1;
941
942 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
943 unsafe {
944 let msr_entries = kvm_msrs[0].entries.as_mut_slice(1);
945 msr_entries[0].index = msr_index;
946 msr_entries[0].data = value;
947 }
948
949 let ret = {
950 // SAFETY:
951 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
952 unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs[0]) }
953 };
954 if ret < 0 {
955 return errno_result();
956 }
957
958 // KVM_SET_MSRS returns the number of msr entries written.
959 if ret != 1 {
960 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
961 return Err(base::Error::new(libc::EPERM));
962 }
963
964 Ok(())
965 }
966
set_cpuid(&self, cpuid: &CpuId) -> Result<()>967 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
968 let cpuid = KvmCpuId::from(cpuid);
969 let ret = {
970 // SAFETY:
971 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
972 unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2, cpuid.as_ptr()) }
973 };
974 if ret == 0 {
975 Ok(())
976 } else {
977 errno_result()
978 }
979 }
980
set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()>981 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
982 use kvm_sys::*;
983 let mut dbg: kvm_guest_debug = Default::default();
984
985 if addrs.len() > 4 {
986 error!(
987 "Support 4 breakpoints at most but {} addresses are passed",
988 addrs.len()
989 );
990 return Err(base::Error::new(libc::EINVAL));
991 }
992
993 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
994 if enable_singlestep {
995 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
996 }
997
998 // Set bits 9 and 10.
999 // bit 9: GE (global exact breakpoint enable) flag.
1000 // bit 10: always 1.
1001 dbg.arch.debugreg[7] = 0x0600;
1002
1003 for (i, addr) in addrs.iter().enumerate() {
1004 dbg.arch.debugreg[i] = addr.0;
1005 // Set global breakpoint enable flag
1006 dbg.arch.debugreg[7] |= 2 << (i * 2);
1007 }
1008
1009 let ret = {
1010 // SAFETY:
1011 // Here we trust the kernel not to read past the end of the kvm_guest_debug struct.
1012 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1013 };
1014 if ret == 0 {
1015 Ok(())
1016 } else {
1017 errno_result()
1018 }
1019 }
1020
1021 /// KVM does not support the VcpuExit::Cpuid exit type.
handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()>1022 fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1023 Err(Error::new(ENXIO))
1024 }
1025
restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()>1026 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1027 // On KVM, the TSC MSR is restored as part of SET_MSRS, and no further action is required.
1028 Ok(())
1029 }
1030 }
1031
1032 impl KvmVcpu {
1033 /// X86 specific call to get the state of the "Local Advanced Programmable Interrupt
1034 /// Controller".
1035 ///
1036 /// See the documentation for KVM_GET_LAPIC.
get_lapic(&self) -> Result<kvm_lapic_state>1037 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1038 let mut klapic: kvm_lapic_state = Default::default();
1039
1040 let ret = {
1041 // SAFETY:
1042 // The ioctl is unsafe unless you trust the kernel not to write past the end of the
1043 // local_apic struct.
1044 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1045 };
1046 if ret < 0 {
1047 return errno_result();
1048 }
1049 Ok(klapic)
1050 }
1051
1052 /// X86 specific call to set the state of the "Local Advanced Programmable Interrupt
1053 /// Controller".
1054 ///
1055 /// See the documentation for KVM_SET_LAPIC.
set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()>1056 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1057 let ret = {
1058 // SAFETY:
1059 // The ioctl is safe because the kernel will only read from the klapic struct.
1060 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1061 };
1062 if ret < 0 {
1063 return errno_result();
1064 }
1065 Ok(())
1066 }
1067
1068 /// X86 specific call to get the value of the APIC_BASE MSR.
1069 ///
1070 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
get_apic_base(&self) -> Result<u64>1071 pub fn get_apic_base(&self) -> Result<u64> {
1072 self.get_msr(MSR_IA32_APICBASE)
1073 }
1074
1075 /// X86 specific call to set the value of the APIC_BASE MSR.
1076 ///
1077 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
set_apic_base(&self, apic_base: u64) -> Result<()>1078 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1079 self.set_msr(MSR_IA32_APICBASE, apic_base)
1080 }
1081
1082 /// Call to get pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1083 ///
1084 /// See the documentation for KVM_GET_SREGS.
get_interrupt_bitmap(&self) -> Result<[u64; 4usize]>1085 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1086 let mut regs: kvm_sregs = Default::default();
1087 // SAFETY:
1088 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1089 // correct amount of memory to our pointer, and we verify the return result.
1090 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1091 if ret >= 0 {
1092 Ok(regs.interrupt_bitmap)
1093 } else {
1094 errno_result()
1095 }
1096 }
1097
1098 /// Call to set pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1099 ///
1100 /// See the documentation for KVM_GET_SREGS.
set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()>1101 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1102 // Potentially racy code. Vcpu registers are set in a separate thread and this could result
1103 // in Sregs being modified from the Vcpu initialization thread and the Irq restoring
1104 // thread.
1105 let mut regs: kvm_sregs = Default::default();
1106 // SAFETY:
1107 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1108 // correct amount of memory to our pointer, and we verify the return result.
1109 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1110 if ret >= 0 {
1111 regs.interrupt_bitmap = interrupt_bitmap;
1112 // SAFETY:
1113 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
1114 // the correct amount of memory from our pointer, and we verify the return
1115 // result.
1116 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, ®s) };
1117 if ret >= 0 {
1118 Ok(())
1119 } else {
1120 errno_result()
1121 }
1122 } else {
1123 errno_result()
1124 }
1125 }
1126 }
1127
1128 impl<'a> From<&'a KvmCpuId> for CpuId {
from(kvm_cpuid: &'a KvmCpuId) -> CpuId1129 fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
1130 let kvm_entries = kvm_cpuid.entries_slice();
1131 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1132
1133 for entry in kvm_entries {
1134 let cpu_id_entry = CpuIdEntry {
1135 function: entry.function,
1136 index: entry.index,
1137 flags: entry.flags,
1138 cpuid: CpuidResult {
1139 eax: entry.eax,
1140 ebx: entry.ebx,
1141 ecx: entry.ecx,
1142 edx: entry.edx,
1143 },
1144 };
1145 cpu_id_entries.push(cpu_id_entry)
1146 }
1147 CpuId { cpu_id_entries }
1148 }
1149 }
1150
1151 impl From<&CpuId> for KvmCpuId {
from(cpuid: &CpuId) -> KvmCpuId1152 fn from(cpuid: &CpuId) -> KvmCpuId {
1153 let mut kvm = KvmCpuId::new(cpuid.cpu_id_entries.len());
1154 let entries = kvm.mut_entries_slice();
1155 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1156 entries[i] = kvm_cpuid_entry2 {
1157 function: e.function,
1158 index: e.index,
1159 flags: e.flags,
1160 eax: e.cpuid.eax,
1161 ebx: e.cpuid.ebx,
1162 ecx: e.cpuid.ecx,
1163 edx: e.cpuid.edx,
1164 ..Default::default()
1165 };
1166 }
1167 kvm
1168 }
1169 }
1170
1171 impl From<&ClockState> for kvm_clock_data {
from(state: &ClockState) -> Self1172 fn from(state: &ClockState) -> Self {
1173 kvm_clock_data {
1174 clock: state.clock,
1175 ..Default::default()
1176 }
1177 }
1178 }
1179
1180 impl From<&kvm_clock_data> for ClockState {
from(clock_data: &kvm_clock_data) -> Self1181 fn from(clock_data: &kvm_clock_data) -> Self {
1182 ClockState {
1183 clock: clock_data.clock,
1184 }
1185 }
1186 }
1187
1188 impl From<&kvm_pic_state> for PicState {
from(item: &kvm_pic_state) -> Self1189 fn from(item: &kvm_pic_state) -> Self {
1190 PicState {
1191 last_irr: item.last_irr,
1192 irr: item.irr,
1193 imr: item.imr,
1194 isr: item.isr,
1195 priority_add: item.priority_add,
1196 irq_base: item.irq_base,
1197 read_reg_select: item.read_reg_select != 0,
1198 poll: item.poll != 0,
1199 special_mask: item.special_mask != 0,
1200 init_state: item.init_state.into(),
1201 auto_eoi: item.auto_eoi != 0,
1202 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1203 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1204 use_4_byte_icw: item.init4 != 0,
1205 elcr: item.elcr,
1206 elcr_mask: item.elcr_mask,
1207 }
1208 }
1209 }
1210
1211 impl From<&PicState> for kvm_pic_state {
from(item: &PicState) -> Self1212 fn from(item: &PicState) -> Self {
1213 kvm_pic_state {
1214 last_irr: item.last_irr,
1215 irr: item.irr,
1216 imr: item.imr,
1217 isr: item.isr,
1218 priority_add: item.priority_add,
1219 irq_base: item.irq_base,
1220 read_reg_select: item.read_reg_select as u8,
1221 poll: item.poll as u8,
1222 special_mask: item.special_mask as u8,
1223 init_state: item.init_state as u8,
1224 auto_eoi: item.auto_eoi as u8,
1225 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1226 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1227 init4: item.use_4_byte_icw as u8,
1228 elcr: item.elcr,
1229 elcr_mask: item.elcr_mask,
1230 }
1231 }
1232 }
1233
1234 impl From<&kvm_ioapic_state> for IoapicState {
from(item: &kvm_ioapic_state) -> Self1235 fn from(item: &kvm_ioapic_state) -> Self {
1236 let mut state = IoapicState {
1237 base_address: item.base_address,
1238 ioregsel: item.ioregsel as u8,
1239 ioapicid: item.id,
1240 current_interrupt_level_bitmap: item.irr,
1241 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1242 };
1243 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1244 *out_state = in_state.into();
1245 }
1246 state
1247 }
1248 }
1249
1250 impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
from(item: &IoapicRedirectionTableEntry) -> Self1251 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1252 kvm_ioapic_state__bindgen_ty_1 {
1253 // IoapicRedirectionTableEntry layout matches the exact bit layout of a hardware
1254 // ioapic redirection table entry, so we can simply do a 64-bit copy
1255 bits: item.get(0, 64),
1256 }
1257 }
1258 }
1259
1260 impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self1261 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1262 let mut entry = IoapicRedirectionTableEntry::default();
1263 // SAFETY:
1264 // Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys
1265 // table entry layout
1266 entry.set(0, 64, unsafe { item.bits });
1267 entry
1268 }
1269 }
1270
1271 impl From<&IoapicState> for kvm_ioapic_state {
from(item: &IoapicState) -> Self1272 fn from(item: &IoapicState) -> Self {
1273 let mut state = kvm_ioapic_state {
1274 base_address: item.base_address,
1275 ioregsel: item.ioregsel as u32,
1276 id: item.ioapicid,
1277 irr: item.current_interrupt_level_bitmap,
1278 ..Default::default()
1279 };
1280 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1281 *out_state = in_state.into();
1282 }
1283 state
1284 }
1285 }
1286
1287 impl From<&LapicState> for kvm_lapic_state {
from(item: &LapicState) -> Self1288 fn from(item: &LapicState) -> Self {
1289 let mut state = kvm_lapic_state::default();
1290 // There are 64 lapic registers
1291 for (reg, value) in item.regs.iter().enumerate() {
1292 // Each lapic register is 16 bytes, but only the first 4 are used
1293 let reg_offset = 16 * reg;
1294 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1295
1296 // to_le_bytes() produces an array of u8, not i8(c_char), so we can't directly use
1297 // copy_from_slice().
1298 for (i, v) in value.to_le_bytes().iter().enumerate() {
1299 regs_slice[i] = *v as i8;
1300 }
1301 }
1302 state
1303 }
1304 }
1305
1306 impl From<&kvm_lapic_state> for LapicState {
from(item: &kvm_lapic_state) -> Self1307 fn from(item: &kvm_lapic_state) -> Self {
1308 let mut state = LapicState { regs: [0; 64] };
1309 // There are 64 lapic registers
1310 for reg in 0..64 {
1311 // Each lapic register is 16 bytes, but only the first 4 are used
1312 let reg_offset = 16 * reg;
1313
1314 // from_le_bytes() only works on arrays of u8, not i8(c_char).
1315 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1316 let mut bytes = [0u8; 4];
1317 for i in 0..4 {
1318 bytes[i] = reg_slice[i] as u8;
1319 }
1320 state.regs[reg] = u32::from_le_bytes(bytes);
1321 }
1322 state
1323 }
1324 }
1325
1326 impl From<&PitState> for kvm_pit_state2 {
from(item: &PitState) -> Self1327 fn from(item: &PitState) -> Self {
1328 kvm_pit_state2 {
1329 channels: [
1330 kvm_pit_channel_state::from(&item.channels[0]),
1331 kvm_pit_channel_state::from(&item.channels[1]),
1332 kvm_pit_channel_state::from(&item.channels[2]),
1333 ],
1334 flags: item.flags,
1335 ..Default::default()
1336 }
1337 }
1338 }
1339
1340 impl From<&kvm_pit_state2> for PitState {
from(item: &kvm_pit_state2) -> Self1341 fn from(item: &kvm_pit_state2) -> Self {
1342 PitState {
1343 channels: [
1344 PitChannelState::from(&item.channels[0]),
1345 PitChannelState::from(&item.channels[1]),
1346 PitChannelState::from(&item.channels[2]),
1347 ],
1348 flags: item.flags,
1349 }
1350 }
1351 }
1352
1353 impl From<&PitChannelState> for kvm_pit_channel_state {
from(item: &PitChannelState) -> Self1354 fn from(item: &PitChannelState) -> Self {
1355 kvm_pit_channel_state {
1356 count: item.count,
1357 latched_count: item.latched_count,
1358 count_latched: item.count_latched as u8,
1359 status_latched: item.status_latched as u8,
1360 status: item.status,
1361 read_state: item.read_state as u8,
1362 write_state: item.write_state as u8,
1363 // kvm's write_latch only stores the low byte of the reload value
1364 write_latch: item.reload_value as u8,
1365 rw_mode: item.rw_mode as u8,
1366 mode: item.mode,
1367 bcd: item.bcd as u8,
1368 gate: item.gate as u8,
1369 count_load_time: item.count_load_time as i64,
1370 }
1371 }
1372 }
1373
1374 impl From<&kvm_pit_channel_state> for PitChannelState {
from(item: &kvm_pit_channel_state) -> Self1375 fn from(item: &kvm_pit_channel_state) -> Self {
1376 PitChannelState {
1377 count: item.count,
1378 latched_count: item.latched_count,
1379 count_latched: item.count_latched.into(),
1380 status_latched: item.status_latched != 0,
1381 status: item.status,
1382 read_state: item.read_state.into(),
1383 write_state: item.write_state.into(),
1384 // kvm's write_latch only stores the low byte of the reload value
1385 reload_value: item.write_latch as u16,
1386 rw_mode: item.rw_mode.into(),
1387 mode: item.mode,
1388 bcd: item.bcd != 0,
1389 gate: item.gate != 0,
1390 count_load_time: item.count_load_time as u64,
1391 }
1392 }
1393 }
1394
1395 // This function translates an IrqSrouceChip to the kvm u32 equivalent. It has a different
1396 // implementation between x86_64 and aarch64 because the irqchip KVM constants are not defined on
1397 // all architectures.
chip_to_kvm_chip(chip: IrqSourceChip) -> u321398 pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1399 match chip {
1400 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1401 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1402 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1403 _ => {
1404 error!("Invalid IrqChipSource for X86 {:?}", chip);
1405 0
1406 }
1407 }
1408 }
1409
1410 impl From<&kvm_regs> for Regs {
from(r: &kvm_regs) -> Self1411 fn from(r: &kvm_regs) -> Self {
1412 Regs {
1413 rax: r.rax,
1414 rbx: r.rbx,
1415 rcx: r.rcx,
1416 rdx: r.rdx,
1417 rsi: r.rsi,
1418 rdi: r.rdi,
1419 rsp: r.rsp,
1420 rbp: r.rbp,
1421 r8: r.r8,
1422 r9: r.r9,
1423 r10: r.r10,
1424 r11: r.r11,
1425 r12: r.r12,
1426 r13: r.r13,
1427 r14: r.r14,
1428 r15: r.r15,
1429 rip: r.rip,
1430 rflags: r.rflags,
1431 }
1432 }
1433 }
1434
1435 impl From<&Regs> for kvm_regs {
from(r: &Regs) -> Self1436 fn from(r: &Regs) -> Self {
1437 kvm_regs {
1438 rax: r.rax,
1439 rbx: r.rbx,
1440 rcx: r.rcx,
1441 rdx: r.rdx,
1442 rsi: r.rsi,
1443 rdi: r.rdi,
1444 rsp: r.rsp,
1445 rbp: r.rbp,
1446 r8: r.r8,
1447 r9: r.r9,
1448 r10: r.r10,
1449 r11: r.r11,
1450 r12: r.r12,
1451 r13: r.r13,
1452 r14: r.r14,
1453 r15: r.r15,
1454 rip: r.rip,
1455 rflags: r.rflags,
1456 }
1457 }
1458 }
1459
1460 impl From<&VcpuEvents> for kvm_vcpu_events {
from(ve: &VcpuEvents) -> Self1461 fn from(ve: &VcpuEvents) -> Self {
1462 let mut kvm_ve: kvm_vcpu_events = Default::default();
1463
1464 kvm_ve.exception.injected = ve.exception.injected as u8;
1465 kvm_ve.exception.nr = ve.exception.nr;
1466 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1467 if let Some(pending) = ve.exception.pending {
1468 kvm_ve.exception.pending = pending as u8;
1469 if ve.exception_payload.is_some() {
1470 kvm_ve.exception_has_payload = true as u8;
1471 }
1472 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1473 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1474 }
1475 kvm_ve.exception.error_code = ve.exception.error_code;
1476
1477 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1478 kvm_ve.interrupt.nr = ve.interrupt.nr;
1479 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1480 if let Some(shadow) = ve.interrupt.shadow {
1481 kvm_ve.interrupt.shadow = shadow;
1482 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1483 }
1484
1485 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1486 if let Some(pending) = ve.nmi.pending {
1487 kvm_ve.nmi.pending = pending as u8;
1488 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1489 }
1490 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1491
1492 if let Some(sipi_vector) = ve.sipi_vector {
1493 kvm_ve.sipi_vector = sipi_vector;
1494 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1495 }
1496
1497 if let Some(smm) = ve.smi.smm {
1498 kvm_ve.smi.smm = smm as u8;
1499 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1500 }
1501 kvm_ve.smi.pending = ve.smi.pending as u8;
1502 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1503 kvm_ve.smi.latched_init = ve.smi.latched_init;
1504
1505 if let Some(pending) = ve.triple_fault.pending {
1506 kvm_ve.triple_fault.pending = pending as u8;
1507 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1508 }
1509 kvm_ve
1510 }
1511 }
1512
1513 impl From<&kvm_vcpu_events> for VcpuEvents {
from(ve: &kvm_vcpu_events) -> Self1514 fn from(ve: &kvm_vcpu_events) -> Self {
1515 let exception = VcpuExceptionState {
1516 injected: ve.exception.injected != 0,
1517 nr: ve.exception.nr,
1518 has_error_code: ve.exception.has_error_code != 0,
1519 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1520 Some(ve.exception.pending != 0)
1521 } else {
1522 None
1523 },
1524 error_code: ve.exception.error_code,
1525 };
1526
1527 let interrupt = VcpuInterruptState {
1528 injected: ve.interrupt.injected != 0,
1529 nr: ve.interrupt.nr,
1530 soft: ve.interrupt.soft != 0,
1531 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1532 Some(ve.interrupt.shadow)
1533 } else {
1534 None
1535 },
1536 };
1537
1538 let nmi = VcpuNmiState {
1539 injected: ve.interrupt.injected != 0,
1540 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1541 Some(ve.nmi.pending != 0)
1542 } else {
1543 None
1544 },
1545 masked: ve.nmi.masked != 0,
1546 };
1547
1548 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1549 Some(ve.sipi_vector)
1550 } else {
1551 None
1552 };
1553
1554 let smi = VcpuSmiState {
1555 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1556 Some(ve.smi.smm != 0)
1557 } else {
1558 None
1559 },
1560 pending: ve.smi.pending != 0,
1561 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1562 latched_init: ve.smi.latched_init,
1563 };
1564
1565 let triple_fault = VcpuTripleFaultState {
1566 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1567 Some(ve.triple_fault.pending != 0)
1568 } else {
1569 None
1570 },
1571 };
1572
1573 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1574 Some(ve.exception_payload)
1575 } else {
1576 None
1577 };
1578
1579 VcpuEvents {
1580 exception,
1581 interrupt,
1582 nmi,
1583 sipi_vector,
1584 smi,
1585 triple_fault,
1586 exception_payload,
1587 }
1588 }
1589 }
1590
1591 impl From<&kvm_segment> for Segment {
from(s: &kvm_segment) -> Self1592 fn from(s: &kvm_segment) -> Self {
1593 Segment {
1594 base: s.base,
1595 limit_bytes: s.limit,
1596 selector: s.selector,
1597 type_: s.type_,
1598 present: s.present,
1599 dpl: s.dpl,
1600 db: s.db,
1601 s: s.s,
1602 l: s.l,
1603 g: s.g,
1604 avl: s.avl,
1605 }
1606 }
1607 }
1608
1609 impl From<&Segment> for kvm_segment {
from(s: &Segment) -> Self1610 fn from(s: &Segment) -> Self {
1611 kvm_segment {
1612 base: s.base,
1613 limit: s.limit_bytes,
1614 selector: s.selector,
1615 type_: s.type_,
1616 present: s.present,
1617 dpl: s.dpl,
1618 db: s.db,
1619 s: s.s,
1620 l: s.l,
1621 g: s.g,
1622 avl: s.avl,
1623 unusable: match s.present {
1624 0 => 1,
1625 _ => 0,
1626 },
1627 ..Default::default()
1628 }
1629 }
1630 }
1631
1632 impl From<&kvm_dtable> for DescriptorTable {
from(dt: &kvm_dtable) -> Self1633 fn from(dt: &kvm_dtable) -> Self {
1634 DescriptorTable {
1635 base: dt.base,
1636 limit: dt.limit,
1637 }
1638 }
1639 }
1640
1641 impl From<&DescriptorTable> for kvm_dtable {
from(dt: &DescriptorTable) -> Self1642 fn from(dt: &DescriptorTable) -> Self {
1643 kvm_dtable {
1644 base: dt.base,
1645 limit: dt.limit,
1646 ..Default::default()
1647 }
1648 }
1649 }
1650
1651 impl From<&kvm_sregs> for Sregs {
from(r: &kvm_sregs) -> Self1652 fn from(r: &kvm_sregs) -> Self {
1653 Sregs {
1654 cs: Segment::from(&r.cs),
1655 ds: Segment::from(&r.ds),
1656 es: Segment::from(&r.es),
1657 fs: Segment::from(&r.fs),
1658 gs: Segment::from(&r.gs),
1659 ss: Segment::from(&r.ss),
1660 tr: Segment::from(&r.tr),
1661 ldt: Segment::from(&r.ldt),
1662 gdt: DescriptorTable::from(&r.gdt),
1663 idt: DescriptorTable::from(&r.idt),
1664 cr0: r.cr0,
1665 cr2: r.cr2,
1666 cr3: r.cr3,
1667 cr4: r.cr4,
1668 cr8: r.cr8,
1669 efer: r.efer,
1670 }
1671 }
1672 }
1673
1674 impl From<&kvm_fpu> for Fpu {
from(r: &kvm_fpu) -> Self1675 fn from(r: &kvm_fpu) -> Self {
1676 Fpu {
1677 fpr: FpuReg::from_16byte_arrays(&r.fpr),
1678 fcw: r.fcw,
1679 fsw: r.fsw,
1680 ftwx: r.ftwx,
1681 last_opcode: r.last_opcode,
1682 last_ip: r.last_ip,
1683 last_dp: r.last_dp,
1684 xmm: r.xmm,
1685 mxcsr: r.mxcsr,
1686 }
1687 }
1688 }
1689
1690 impl From<&Fpu> for kvm_fpu {
from(r: &Fpu) -> Self1691 fn from(r: &Fpu) -> Self {
1692 kvm_fpu {
1693 fpr: FpuReg::to_16byte_arrays(&r.fpr),
1694 fcw: r.fcw,
1695 fsw: r.fsw,
1696 ftwx: r.ftwx,
1697 last_opcode: r.last_opcode,
1698 last_ip: r.last_ip,
1699 last_dp: r.last_dp,
1700 xmm: r.xmm,
1701 mxcsr: r.mxcsr,
1702 ..Default::default()
1703 }
1704 }
1705 }
1706
1707 impl From<&kvm_debugregs> for DebugRegs {
from(r: &kvm_debugregs) -> Self1708 fn from(r: &kvm_debugregs) -> Self {
1709 DebugRegs {
1710 db: r.db,
1711 dr6: r.dr6,
1712 dr7: r.dr7,
1713 }
1714 }
1715 }
1716
1717 impl From<&DebugRegs> for kvm_debugregs {
from(r: &DebugRegs) -> Self1718 fn from(r: &DebugRegs) -> Self {
1719 kvm_debugregs {
1720 db: r.db,
1721 dr6: r.dr6,
1722 dr7: r.dr7,
1723 ..Default::default()
1724 }
1725 }
1726 }
1727
1728 #[cfg(test)]
1729 mod tests {
1730 use super::*;
1731
1732 #[test]
vcpu_event_to_from()1733 fn vcpu_event_to_from() {
1734 // All data is random.
1735 let mut kvm_ve: kvm_vcpu_events = Default::default();
1736 kvm_ve.exception.injected = 1;
1737 kvm_ve.exception.nr = 65;
1738 kvm_ve.exception.has_error_code = 1;
1739 kvm_ve.exception.error_code = 110;
1740 kvm_ve.exception.pending = 1;
1741
1742 kvm_ve.interrupt.injected = 1;
1743 kvm_ve.interrupt.nr = 100;
1744 kvm_ve.interrupt.soft = 1;
1745 kvm_ve.interrupt.shadow = 114;
1746
1747 kvm_ve.nmi.injected = 1;
1748 kvm_ve.nmi.pending = 1;
1749 kvm_ve.nmi.masked = 0;
1750
1751 kvm_ve.sipi_vector = 105;
1752
1753 kvm_ve.smi.smm = 1;
1754 kvm_ve.smi.pending = 1;
1755 kvm_ve.smi.smm_inside_nmi = 1;
1756 kvm_ve.smi.latched_init = 100;
1757
1758 kvm_ve.triple_fault.pending = 0;
1759
1760 kvm_ve.exception_payload = 33;
1761 kvm_ve.exception_has_payload = 1;
1762
1763 kvm_ve.flags = 0
1764 | KVM_VCPUEVENT_VALID_PAYLOAD
1765 | KVM_VCPUEVENT_VALID_SMM
1766 | KVM_VCPUEVENT_VALID_NMI_PENDING
1767 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1768 | KVM_VCPUEVENT_VALID_SHADOW;
1769
1770 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1771 assert_eq!(ve.exception.injected, true);
1772 assert_eq!(ve.exception.nr, 65);
1773 assert_eq!(ve.exception.has_error_code, true);
1774 assert_eq!(ve.exception.error_code, 110);
1775 assert_eq!(ve.exception.pending.unwrap(), true);
1776
1777 assert_eq!(ve.interrupt.injected, true);
1778 assert_eq!(ve.interrupt.nr, 100);
1779 assert_eq!(ve.interrupt.soft, true);
1780 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1781
1782 assert_eq!(ve.nmi.injected, true);
1783 assert_eq!(ve.nmi.pending.unwrap(), true);
1784 assert_eq!(ve.nmi.masked, false);
1785
1786 assert_eq!(ve.sipi_vector.unwrap(), 105);
1787
1788 assert_eq!(ve.smi.smm.unwrap(), true);
1789 assert_eq!(ve.smi.pending, true);
1790 assert_eq!(ve.smi.smm_inside_nmi, true);
1791 assert_eq!(ve.smi.latched_init, 100);
1792
1793 assert_eq!(ve.triple_fault.pending, None);
1794
1795 assert_eq!(ve.exception_payload.unwrap(), 33);
1796
1797 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1798 assert_eq!(kvm_ve_restored.exception.injected, 1);
1799 assert_eq!(kvm_ve_restored.exception.nr, 65);
1800 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1801 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1802 assert_eq!(kvm_ve_restored.exception.pending, 1);
1803
1804 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1805 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1806 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1807 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1808
1809 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1810 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1811 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1812
1813 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1814
1815 assert_eq!(kvm_ve_restored.smi.smm, 1);
1816 assert_eq!(kvm_ve_restored.smi.pending, 1);
1817 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1818 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1819
1820 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1821
1822 assert_eq!(kvm_ve_restored.exception_payload, 33);
1823 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1824 }
1825 }
1826