xref: /aosp_15_r20/external/crosvm/kvm/tests/kvm_tests.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![cfg(any(target_os = "android", target_os = "linux"))]
6 
7 use base::pagesize;
8 use base::Event;
9 #[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
10 use base::FromRawDescriptor;
11 use base::IntoRawDescriptor;
12 use base::MappedRegion;
13 use base::MemoryMappingBuilder;
14 use base::SIGRTMIN;
15 use kvm::dirty_log_bitmap_size;
16 use kvm::Cap;
17 use kvm::Datamatch;
18 use kvm::IoeventAddress;
19 #[cfg(target_arch = "x86_64")]
20 use kvm::IrqRoute;
21 #[cfg(target_arch = "x86_64")]
22 use kvm::IrqSource;
23 use kvm::Kvm;
24 #[cfg(target_arch = "x86_64")]
25 use kvm::PicId;
26 use kvm::Vcpu;
27 use kvm::Vm;
28 #[cfg(target_arch = "x86_64")]
29 use kvm_sys::kvm_enable_cap;
30 #[cfg(target_arch = "x86_64")]
31 use kvm_sys::kvm_msr_entry;
32 #[cfg(target_arch = "x86_64")]
33 use kvm_sys::KVM_CAP_HYPERV_SYNIC;
34 #[cfg(target_arch = "x86_64")]
35 use kvm_sys::KVM_IRQCHIP_IOAPIC;
36 #[cfg(target_arch = "x86_64")]
37 use libc::EINVAL;
38 use vm_memory::GuestAddress;
39 use vm_memory::GuestMemory;
40 
41 #[test]
dirty_log_size()42 fn dirty_log_size() {
43     let page_size = pagesize();
44     assert_eq!(dirty_log_bitmap_size(0), 0);
45     assert_eq!(dirty_log_bitmap_size(page_size), 1);
46     assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
47     assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
48     assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
49 }
50 
51 #[test]
new()52 fn new() {
53     Kvm::new().unwrap();
54 }
55 
56 #[test]
create_vm()57 fn create_vm() {
58     let kvm = Kvm::new().unwrap();
59     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
60     Vm::new(&kvm, gm).unwrap();
61 }
62 
63 #[test]
check_extension()64 fn check_extension() {
65     let kvm = Kvm::new().unwrap();
66     assert!(kvm.check_extension(Cap::UserMemory));
67     // I assume nobody is testing this on s390
68     assert!(!kvm.check_extension(Cap::S390UserSigp));
69 }
70 
71 #[test]
check_vm_extension()72 fn check_vm_extension() {
73     let kvm = Kvm::new().unwrap();
74     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
75     let vm = Vm::new(&kvm, gm).unwrap();
76     assert!(vm.check_extension(Cap::UserMemory));
77     // I assume nobody is testing this on s390
78     assert!(!vm.check_extension(Cap::S390UserSigp));
79 }
80 
81 #[test]
82 #[cfg(target_arch = "x86_64")]
get_supported_cpuid()83 fn get_supported_cpuid() {
84     let kvm = Kvm::new().unwrap();
85     let mut cpuid = kvm.get_supported_cpuid().unwrap();
86     let cpuid_entries = cpuid.mut_entries_slice();
87     assert!(!cpuid_entries.is_empty());
88 }
89 
90 #[test]
91 #[cfg(target_arch = "x86_64")]
get_emulated_cpuid()92 fn get_emulated_cpuid() {
93     let kvm = Kvm::new().unwrap();
94     kvm.get_emulated_cpuid().unwrap();
95 }
96 
97 #[test]
98 #[cfg(target_arch = "x86_64")]
get_msr_index_list()99 fn get_msr_index_list() {
100     let kvm = Kvm::new().unwrap();
101     let msr_list = kvm.get_msr_index_list().unwrap();
102     assert!(msr_list.len() >= 2);
103 }
104 
105 #[test]
add_memory()106 fn add_memory() {
107     let kvm = Kvm::new().unwrap();
108     let gm = GuestMemory::new(&[
109         (GuestAddress(0), pagesize() as u64),
110         (GuestAddress(5 * pagesize() as u64), 5 * pagesize() as u64),
111     ])
112     .unwrap();
113     let mut vm = Vm::new(&kvm, gm).unwrap();
114     let mem_size = pagesize();
115     let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
116     vm.add_memory_region(GuestAddress(pagesize() as u64), Box::new(mem), false, false)
117         .unwrap();
118     let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
119     vm.add_memory_region(
120         GuestAddress(0x10 * pagesize() as u64),
121         Box::new(mem),
122         false,
123         false,
124     )
125     .unwrap();
126 }
127 
128 #[test]
add_memory_ro()129 fn add_memory_ro() {
130     let kvm = Kvm::new().unwrap();
131     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
132     let mut vm = Vm::new(&kvm, gm).unwrap();
133     let mem_size = pagesize();
134     let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
135     vm.add_memory_region(GuestAddress(pagesize() as u64), Box::new(mem), true, false)
136         .unwrap();
137 }
138 
139 #[test]
remove_memory_region()140 fn remove_memory_region() {
141     let kvm = Kvm::new().unwrap();
142     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
143     let mut vm = Vm::new(&kvm, gm).unwrap();
144     let mem_size = pagesize();
145     let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
146     let mem_ptr = mem.as_ptr();
147     let slot = vm
148         .add_memory_region(GuestAddress(pagesize() as u64), Box::new(mem), false, false)
149         .unwrap();
150     let removed_mem = vm.remove_memory_region(slot).unwrap();
151     assert_eq!(removed_mem.size(), mem_size);
152     assert_eq!(removed_mem.as_ptr(), mem_ptr);
153 }
154 
155 #[test]
remove_invalid_memory()156 fn remove_invalid_memory() {
157     let kvm = Kvm::new().unwrap();
158     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
159     let mut vm = Vm::new(&kvm, gm).unwrap();
160     assert!(vm.remove_memory_region(0).is_err());
161 }
162 
163 #[test]
overlap_memory()164 fn overlap_memory() {
165     let kvm = Kvm::new().unwrap();
166     let gm = GuestMemory::new(&[(GuestAddress(0), 0x10 * pagesize() as u64)]).unwrap();
167     let mut vm = Vm::new(&kvm, gm).unwrap();
168     let mem_size = 2 * pagesize();
169     let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
170     assert!(vm
171         .add_memory_region(
172             GuestAddress(2 * pagesize() as u64),
173             Box::new(mem),
174             false,
175             false
176         )
177         .is_err());
178 }
179 
180 #[test]
get_memory()181 fn get_memory() {
182     let kvm = Kvm::new().unwrap();
183     let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
184     let vm = Vm::new(&kvm, gm).unwrap();
185     let obj_addr = GuestAddress(0xf0);
186     vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
187     let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
188     assert_eq!(read_val, 67u8);
189 }
190 
191 #[test]
192 #[cfg(target_arch = "x86_64")]
clock_handling()193 fn clock_handling() {
194     let kvm = Kvm::new().unwrap();
195     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
196     let vm = Vm::new(&kvm, gm).unwrap();
197     let mut clock_data = vm.get_clock().unwrap();
198     clock_data.clock += 1000;
199     vm.set_clock(&clock_data).unwrap();
200 }
201 
202 #[test]
203 #[cfg(target_arch = "x86_64")]
pic_handling()204 fn pic_handling() {
205     let kvm = Kvm::new().unwrap();
206     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
207     let vm = Vm::new(&kvm, gm).unwrap();
208     vm.create_irq_chip().unwrap();
209     let pic_state = vm.get_pic_state(PicId::Secondary).unwrap();
210     vm.set_pic_state(PicId::Secondary, &pic_state).unwrap();
211 }
212 
213 #[test]
214 #[cfg(target_arch = "x86_64")]
ioapic_handling()215 fn ioapic_handling() {
216     let kvm = Kvm::new().unwrap();
217     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
218     let vm = Vm::new(&kvm, gm).unwrap();
219     vm.create_irq_chip().unwrap();
220     let ioapic_state = vm.get_ioapic_state().unwrap();
221     vm.set_ioapic_state(&ioapic_state).unwrap();
222 }
223 
224 #[test]
225 #[cfg(target_arch = "x86_64")]
pit_handling()226 fn pit_handling() {
227     let kvm = Kvm::new().unwrap();
228     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
229     let vm = Vm::new(&kvm, gm).unwrap();
230     vm.create_irq_chip().unwrap();
231     vm.create_pit().unwrap();
232     let pit_state = vm.get_pit_state().unwrap();
233     vm.set_pit_state(&pit_state).unwrap();
234 }
235 
236 #[test]
register_ioevent()237 fn register_ioevent() {
238     let kvm = Kvm::new().unwrap();
239     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
240     let vm = Vm::new(&kvm, gm).unwrap();
241     let evtfd = Event::new().unwrap();
242     vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
243         .unwrap();
244     vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
245         .unwrap();
246     vm.register_ioevent(
247         &evtfd,
248         IoeventAddress::Pio(0xc1),
249         Datamatch::U8(Some(0x7fu8)),
250     )
251     .unwrap();
252     vm.register_ioevent(
253         &evtfd,
254         IoeventAddress::Pio(0xc2),
255         Datamatch::U16(Some(0x1337u16)),
256     )
257     .unwrap();
258     vm.register_ioevent(
259         &evtfd,
260         IoeventAddress::Pio(0xc4),
261         Datamatch::U32(Some(0xdeadbeefu32)),
262     )
263     .unwrap();
264     vm.register_ioevent(
265         &evtfd,
266         IoeventAddress::Pio(0xc8),
267         Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
268     )
269     .unwrap();
270 }
271 
272 #[test]
unregister_ioevent()273 fn unregister_ioevent() {
274     let kvm = Kvm::new().unwrap();
275     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
276     let vm = Vm::new(&kvm, gm).unwrap();
277     let evtfd = Event::new().unwrap();
278     vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
279         .unwrap();
280     vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
281         .unwrap();
282     vm.register_ioevent(
283         &evtfd,
284         IoeventAddress::Mmio(0x1004),
285         Datamatch::U8(Some(0x7fu8)),
286     )
287     .unwrap();
288     vm.unregister_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
289         .unwrap();
290     vm.unregister_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
291         .unwrap();
292     vm.unregister_ioevent(
293         &evtfd,
294         IoeventAddress::Mmio(0x1004),
295         Datamatch::U8(Some(0x7fu8)),
296     )
297     .unwrap();
298 }
299 
300 #[test]
301 #[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
irqfd_resample()302 fn irqfd_resample() {
303     let kvm = Kvm::new().unwrap();
304     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
305     let vm = Vm::new(&kvm, gm).unwrap();
306     let evtfd1 = Event::new().unwrap();
307     let evtfd2 = Event::new().unwrap();
308     vm.create_irq_chip().unwrap();
309     vm.register_irqfd_resample(&evtfd1, &evtfd2, 4).unwrap();
310     vm.unregister_irqfd(&evtfd1, 4).unwrap();
311 
312     // Ensures the ioctl is actually reading the resamplefd by providing an invalid fd and expecting
313     // an error. File descriptor numbers are allocated sequentially, so this very large fd should
314     // never practically be in use.
315     // SAFETY: This is a bad idea! Don't try this at home! Professional driver on a closed course.
316     let resample_evt = unsafe { Event::from_raw_descriptor(2147483647) };
317     vm.register_irqfd_resample(&evtfd1, &resample_evt, 4)
318         .unwrap_err();
319     let _ = resample_evt.into_raw_descriptor(); // Don't try to close the invalid fd.
320 }
321 
322 #[test]
323 #[cfg(target_arch = "x86_64")]
set_gsi_routing()324 fn set_gsi_routing() {
325     let kvm = Kvm::new().unwrap();
326     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
327     let vm = Vm::new(&kvm, gm).unwrap();
328     vm.create_irq_chip().unwrap();
329     vm.set_gsi_routing(&[]).unwrap();
330     vm.set_gsi_routing(&[IrqRoute {
331         gsi: 1,
332         source: IrqSource::Irqchip {
333             chip: KVM_IRQCHIP_IOAPIC,
334             pin: 3,
335         },
336     }])
337     .unwrap();
338     vm.set_gsi_routing(&[IrqRoute {
339         gsi: 1,
340         source: IrqSource::Msi {
341             address: 0xf000000,
342             data: 0xa0,
343         },
344     }])
345     .unwrap();
346     vm.set_gsi_routing(&[
347         IrqRoute {
348             gsi: 1,
349             source: IrqSource::Irqchip {
350                 chip: KVM_IRQCHIP_IOAPIC,
351                 pin: 3,
352             },
353         },
354         IrqRoute {
355             gsi: 2,
356             source: IrqSource::Msi {
357                 address: 0xf000000,
358                 data: 0xa0,
359             },
360         },
361     ])
362     .unwrap();
363 }
364 
365 #[test]
create_vcpu()366 fn create_vcpu() {
367     let kvm = Kvm::new().unwrap();
368     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
369     let vm = Vm::new(&kvm, gm).unwrap();
370     Vcpu::new(0, &kvm, &vm).unwrap();
371 }
372 
373 #[test]
374 #[cfg(target_arch = "x86_64")]
debugregs()375 fn debugregs() {
376     let kvm = Kvm::new().unwrap();
377     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
378     let vm = Vm::new(&kvm, gm).unwrap();
379     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
380     let mut dregs = vcpu.get_debugregs().unwrap();
381     dregs.dr7 = 13;
382     vcpu.set_debugregs(&dregs).unwrap();
383     let dregs2 = vcpu.get_debugregs().unwrap();
384     assert_eq!(dregs.dr7, dregs2.dr7);
385 }
386 
387 #[test]
388 #[cfg(target_arch = "x86_64")]
xcrs()389 fn xcrs() {
390     let kvm = Kvm::new().unwrap();
391     if !kvm.check_extension(Cap::Xcrs) {
392         return;
393     }
394 
395     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
396     let vm = Vm::new(&kvm, gm).unwrap();
397     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
398     let mut xcrs = vcpu.get_xcrs().unwrap();
399     xcrs.xcrs[0].value = 1;
400     vcpu.set_xcrs(&xcrs).unwrap();
401     let xcrs2 = vcpu.get_xcrs().unwrap();
402     assert_eq!(xcrs.xcrs[0].value, xcrs2.xcrs[0].value);
403 }
404 
405 #[test]
406 #[cfg(target_arch = "x86_64")]
get_msrs()407 fn get_msrs() {
408     let kvm = Kvm::new().unwrap();
409     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
410     let vm = Vm::new(&kvm, gm).unwrap();
411     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
412     let mut msrs = vec![
413         // This one should succeed
414         kvm_msr_entry {
415             index: 0x0000011e,
416             ..Default::default()
417         },
418         // This one will fail to fetch
419         kvm_msr_entry {
420             index: 0xffffffff,
421             ..Default::default()
422         },
423     ];
424     vcpu.get_msrs(&mut msrs).unwrap();
425     assert_eq!(msrs.len(), 1);
426 }
427 
428 #[test]
429 #[cfg(target_arch = "x86_64")]
get_hyperv_cpuid()430 fn get_hyperv_cpuid() {
431     let kvm = Kvm::new().unwrap();
432     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
433     let vm = Vm::new(&kvm, gm).unwrap();
434     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
435     let cpuid = vcpu.get_hyperv_cpuid();
436     // Older kernels don't support so tolerate this kind of failure.
437     match cpuid {
438         Ok(_) => {}
439         Err(e) => {
440             assert_eq!(e.errno(), EINVAL);
441         }
442     }
443 }
444 
445 #[test]
446 #[cfg(target_arch = "x86_64")]
enable_feature()447 fn enable_feature() {
448     let kvm = Kvm::new().unwrap();
449     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
450     let vm = Vm::new(&kvm, gm).unwrap();
451     vm.create_irq_chip().unwrap();
452     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
453     let cap: kvm_enable_cap = kvm_sys::kvm_enable_cap {
454         cap: KVM_CAP_HYPERV_SYNIC,
455         ..Default::default()
456     };
457     // TODO(b/315998194): Add safety comment
458     #[allow(clippy::undocumented_unsafe_blocks)]
459     unsafe { vcpu.kvm_enable_cap(&cap) }.unwrap();
460 }
461 
462 #[test]
463 #[cfg(target_arch = "x86_64")]
mp_state()464 fn mp_state() {
465     let kvm = Kvm::new().unwrap();
466     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
467     let vm = Vm::new(&kvm, gm).unwrap();
468     vm.create_irq_chip().unwrap();
469     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
470     let state = vcpu.get_mp_state().unwrap();
471     vcpu.set_mp_state(&state).unwrap();
472 }
473 
474 #[test]
set_signal_mask()475 fn set_signal_mask() {
476     let kvm = Kvm::new().unwrap();
477     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
478     let vm = Vm::new(&kvm, gm).unwrap();
479     let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
480     vcpu.set_signal_mask(&[SIGRTMIN() + 0]).unwrap();
481 }
482 
483 #[test]
vcpu_mmap_size()484 fn vcpu_mmap_size() {
485     let kvm = Kvm::new().unwrap();
486     let mmap_size = kvm.get_vcpu_mmap_size().unwrap();
487     let page_size = pagesize();
488     assert!(mmap_size >= page_size);
489     assert!(mmap_size % page_size == 0);
490 }
491 
492 #[test]
493 #[cfg(target_arch = "x86_64")]
set_identity_map_addr()494 fn set_identity_map_addr() {
495     let kvm = Kvm::new().unwrap();
496     let gm = GuestMemory::new(&[(GuestAddress(0), 10 * pagesize() as u64)]).unwrap();
497     let vm = Vm::new(&kvm, gm).unwrap();
498     vm.set_identity_map_addr(GuestAddress(20 * pagesize() as u64))
499         .unwrap();
500 }
501