1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #![cfg(any(target_os = "android", target_os = "linux"))]
6
7 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
8 mod aarch64;
9
10 #[cfg(target_arch = "x86_64")]
11 mod x86_64;
12
13 use std::thread;
14
15 use base::pagesize;
16 use base::Event;
17 use base::FromRawDescriptor;
18 use base::IntoRawDescriptor;
19 use base::MappedRegion;
20 use base::MemoryMappingArena;
21 use base::MemoryMappingBuilder;
22 use hypervisor::kvm::dirty_log_bitmap_size;
23 use hypervisor::kvm::Kvm;
24 use hypervisor::kvm::KvmCap;
25 use hypervisor::kvm::KvmVm;
26 use hypervisor::Datamatch;
27 use hypervisor::Hypervisor;
28 use hypervisor::HypervisorCap;
29 use hypervisor::IoEventAddress;
30 use hypervisor::MemCacheType::CacheCoherent;
31 use hypervisor::Vm;
32 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
33 use hypervisor::VmAArch64;
34 #[cfg(target_arch = "riscv64")]
35 use hypervisor::VmRiscv64;
36 #[cfg(target_arch = "x86_64")]
37 use hypervisor::VmX86_64;
38 use vm_memory::GuestAddress;
39 use vm_memory::GuestMemory;
40
41 #[test]
dirty_log_size()42 fn dirty_log_size() {
43 let page_size = pagesize();
44 assert_eq!(dirty_log_bitmap_size(0), 0);
45 assert_eq!(dirty_log_bitmap_size(page_size), 1);
46 assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
47 assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
48 assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
49 }
50
51 #[test]
new()52 fn new() {
53 Kvm::new().unwrap();
54 }
55
56 #[test]
check_capability()57 fn check_capability() {
58 let kvm = Kvm::new().unwrap();
59 assert!(kvm.check_capability(HypervisorCap::UserMemory));
60 assert!(!kvm.check_capability(HypervisorCap::S390UserSigp));
61 }
62
63 #[test]
create_vm()64 fn create_vm() {
65 let kvm = Kvm::new().unwrap();
66 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
67 KvmVm::new(&kvm, gm, Default::default()).unwrap();
68 }
69
70 #[test]
clone_vm()71 fn clone_vm() {
72 let kvm = Kvm::new().unwrap();
73 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
74 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
75 vm.try_clone().unwrap();
76 }
77
78 #[test]
send_vm()79 fn send_vm() {
80 let kvm = Kvm::new().unwrap();
81 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
82 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
83 thread::spawn(move || {
84 let _vm = vm;
85 })
86 .join()
87 .unwrap();
88 }
89
90 #[test]
check_vm_capability()91 fn check_vm_capability() {
92 let kvm = Kvm::new().unwrap();
93 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
94 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
95 assert!(vm.check_raw_capability(KvmCap::UserMemory));
96 // I assume nobody is testing this on s390
97 assert!(!vm.check_raw_capability(KvmCap::S390UserSigp));
98 }
99
100 #[test]
create_vcpu()101 fn create_vcpu() {
102 let kvm = Kvm::new().unwrap();
103 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
104 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
105 vm.create_vcpu(0).unwrap();
106 }
107
108 #[test]
get_memory()109 fn get_memory() {
110 let kvm = Kvm::new().unwrap();
111 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
112 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
113 let obj_addr = GuestAddress(0xf0);
114 vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
115 let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
116 assert_eq!(read_val, 67u8);
117 }
118
119 #[test]
add_memory()120 fn add_memory() {
121 let kvm = Kvm::new().unwrap();
122 let gm = GuestMemory::new(&[
123 (GuestAddress(0), pagesize() as u64),
124 (GuestAddress(pagesize() as u64 * 5), pagesize() as u64 * 5),
125 ])
126 .unwrap();
127 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
128 let mem_size = 0x1000;
129 let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
130 vm.add_memory_region(
131 GuestAddress(pagesize() as u64),
132 Box::new(mem),
133 false,
134 false,
135 CacheCoherent,
136 )
137 .unwrap();
138 let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
139 vm.add_memory_region(
140 GuestAddress(0x10 * pagesize() as u64),
141 Box::new(mem),
142 false,
143 false,
144 CacheCoherent,
145 )
146 .unwrap();
147 }
148
149 #[test]
add_memory_ro()150 fn add_memory_ro() {
151 let kvm = Kvm::new().unwrap();
152 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
153 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
154 let mem_size = 0x1000;
155 let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
156 vm.add_memory_region(
157 GuestAddress(pagesize() as u64),
158 Box::new(mem),
159 true,
160 false,
161 CacheCoherent,
162 )
163 .unwrap();
164 }
165
166 #[test]
remove_memory()167 fn remove_memory() {
168 let kvm = Kvm::new().unwrap();
169 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
170 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
171 let mem_size = 0x1000;
172 let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
173 let mem_ptr = mem.as_ptr();
174 let slot = vm
175 .add_memory_region(
176 GuestAddress(pagesize() as u64),
177 Box::new(mem),
178 false,
179 false,
180 CacheCoherent,
181 )
182 .unwrap();
183 let removed_mem = vm.remove_memory_region(slot).unwrap();
184 assert_eq!(removed_mem.size(), mem_size);
185 assert_eq!(removed_mem.as_ptr(), mem_ptr);
186 }
187
188 #[test]
remove_invalid_memory()189 fn remove_invalid_memory() {
190 let kvm = Kvm::new().unwrap();
191 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
192 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
193 assert!(vm.remove_memory_region(0).is_err());
194 }
195
196 #[test]
overlap_memory()197 fn overlap_memory() {
198 let kvm = Kvm::new().unwrap();
199 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10 * pagesize() as u64)]).unwrap();
200 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
201 let mem_size = 2 * pagesize();
202 let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
203 assert!(vm
204 .add_memory_region(
205 GuestAddress(2 * pagesize() as u64),
206 Box::new(mem),
207 false,
208 false,
209 CacheCoherent,
210 )
211 .is_err());
212 }
213
214 #[test]
sync_memory()215 fn sync_memory() {
216 let kvm = Kvm::new().unwrap();
217 let gm = GuestMemory::new(&[
218 (GuestAddress(0), pagesize() as u64),
219 (GuestAddress(5 * pagesize() as u64), 5 * pagesize() as u64),
220 ])
221 .unwrap();
222 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
223 let mem_size = pagesize();
224 let mem = MemoryMappingArena::new(mem_size).unwrap();
225 let slot = vm
226 .add_memory_region(
227 GuestAddress(pagesize() as u64),
228 Box::new(mem),
229 false,
230 false,
231 CacheCoherent,
232 )
233 .unwrap();
234 vm.msync_memory_region(slot, mem_size, 0).unwrap();
235 assert!(vm.msync_memory_region(slot, mem_size + 1, 0).is_err());
236 assert!(vm.msync_memory_region(slot + 1, mem_size, 0).is_err());
237 }
238
239 #[test]
register_irqfd()240 fn register_irqfd() {
241 let kvm = Kvm::new().unwrap();
242 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
243 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
244 let evtfd1 = Event::new().unwrap();
245 let evtfd2 = Event::new().unwrap();
246 let evtfd3 = Event::new().unwrap();
247 vm.create_irq_chip().unwrap();
248 vm.register_irqfd(4, &evtfd1, None).unwrap();
249 vm.register_irqfd(8, &evtfd2, None).unwrap();
250 vm.register_irqfd(4, &evtfd3, None).unwrap();
251 vm.register_irqfd(4, &evtfd3, None).unwrap_err();
252 }
253
254 #[test]
unregister_irqfd()255 fn unregister_irqfd() {
256 let kvm = Kvm::new().unwrap();
257 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
258 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
259 let evtfd1 = Event::new().unwrap();
260 let evtfd2 = Event::new().unwrap();
261 let evtfd3 = Event::new().unwrap();
262 vm.create_irq_chip().unwrap();
263 vm.register_irqfd(4, &evtfd1, None).unwrap();
264 vm.register_irqfd(8, &evtfd2, None).unwrap();
265 vm.register_irqfd(4, &evtfd3, None).unwrap();
266 vm.unregister_irqfd(4, &evtfd1).unwrap();
267 vm.unregister_irqfd(8, &evtfd2).unwrap();
268 vm.unregister_irqfd(4, &evtfd3).unwrap();
269 }
270
271 #[test]
irqfd_resample()272 fn irqfd_resample() {
273 let kvm = Kvm::new().unwrap();
274 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
275 let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
276 let evtfd1 = Event::new().unwrap();
277 let evtfd2 = Event::new().unwrap();
278 vm.create_irq_chip().unwrap();
279 vm.register_irqfd(4, &evtfd1, Some(&evtfd2)).unwrap();
280 vm.unregister_irqfd(4, &evtfd1).unwrap();
281
282 // Ensures the ioctl is actually reading the resamplefd by providing an invalid fd and expecting
283 // an error. File descriptor numbers are allocated sequentially, so this very large fd should
284 // never practically be in use.
285 // SAFETY: This is a bad idea! Don't try this at home! Professional driver on a closed course.
286 let resample_evt = unsafe { Event::from_raw_descriptor(2147483647) };
287 vm.register_irqfd(4, &evtfd1, Some(&resample_evt))
288 .unwrap_err();
289 let _ = resample_evt.into_raw_descriptor(); // Don't try to close the invalid fd.
290 }
291
292 #[test]
register_ioevent()293 fn register_ioevent() {
294 let kvm = Kvm::new().unwrap();
295 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
296 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
297 let evtfd = Event::new().unwrap();
298 vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
299 .unwrap();
300 vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
301 .unwrap();
302 vm.register_ioevent(
303 &evtfd,
304 IoEventAddress::Pio(0xc1),
305 Datamatch::U8(Some(0x7fu8)),
306 )
307 .unwrap();
308 vm.register_ioevent(
309 &evtfd,
310 IoEventAddress::Pio(0xc2),
311 Datamatch::U16(Some(0x1337u16)),
312 )
313 .unwrap();
314 vm.register_ioevent(
315 &evtfd,
316 IoEventAddress::Pio(0xc4),
317 Datamatch::U32(Some(0xdeadbeefu32)),
318 )
319 .unwrap();
320 vm.register_ioevent(
321 &evtfd,
322 IoEventAddress::Pio(0xc8),
323 Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
324 )
325 .unwrap();
326 }
327
328 #[test]
unregister_ioevent()329 fn unregister_ioevent() {
330 let kvm = Kvm::new().unwrap();
331 let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
332 let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
333 let evtfd = Event::new().unwrap();
334 vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
335 .unwrap();
336 vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
337 .unwrap();
338 vm.register_ioevent(
339 &evtfd,
340 IoEventAddress::Mmio(0x1004),
341 Datamatch::U8(Some(0x7fu8)),
342 )
343 .unwrap();
344 vm.unregister_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
345 .unwrap();
346 vm.unregister_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
347 .unwrap();
348 vm.unregister_ioevent(
349 &evtfd,
350 IoEventAddress::Mmio(0x1004),
351 Datamatch::U8(Some(0x7fu8)),
352 )
353 .unwrap();
354 }
355