1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2021 SUSE LLC <[email protected]>
4 *
5 * KVM host library for setting up and running virtual machine tests.
6 */
7
8 #include <stdlib.h>
9 #include <errno.h>
10
11 #define TST_NO_DEFAULT_MAIN
12 #include "tst_test.h"
13 #include "tst_clocks.h"
14 #include "tst_timer.h"
15 #include "kvm_host.h"
16
17 static struct tst_kvm_instance test_vm = { .vm_fd = -1 };
18
19 const unsigned char tst_kvm_reset_code[VM_RESET_CODE_SIZE] = {
20 0xea, 0x00, 0x10, 0x00, 0x00 /* JMP 0x1000 */
21 };
22
tst_kvm_validate_result(int value)23 void tst_kvm_validate_result(int value)
24 {
25 int ttype, valid_result[] = {TPASS, TFAIL, TBROK, TWARN, TINFO, TCONF};
26 size_t i;
27
28 if (value == KVM_TNONE)
29 tst_brk(TBROK, "KVM test did not return any result");
30
31 ttype = TTYPE_RESULT(value);
32
33 for (i = 0; i < ARRAY_SIZE(valid_result); i++) {
34 if (ttype == valid_result[i])
35 return;
36 }
37
38 tst_brk(TBROK, "KVM test returned invalid result value %d", value);
39 }
40
tst_kvm_get_phys_address(const struct tst_kvm_instance * inst,uint64_t addr)41 uint64_t tst_kvm_get_phys_address(const struct tst_kvm_instance *inst,
42 uint64_t addr)
43 {
44 struct kvm_translation trans = { .linear_address = addr };
45
46 TEST(ioctl(inst->vcpu_fd, KVM_TRANSLATE, &trans));
47
48 /* ioctl(KVM_TRANSLATE) is not implemented for this arch */
49 if (TST_RET == -1 && TST_ERR == EINVAL)
50 return addr;
51
52 if (TST_RET == -1)
53 tst_brk(TBROK | TTERRNO, "ioctl(KVM_TRANSLATE) failed");
54
55 if (TST_RET) {
56 tst_brk(TBROK | TTERRNO,
57 "Invalid ioctl(KVM_TRANSLATE) return value");
58 }
59
60 return trans.valid ? trans.physical_address : 0;
61 }
62
tst_kvm_find_phys_memslot(const struct tst_kvm_instance * inst,uint64_t paddr)63 int tst_kvm_find_phys_memslot(const struct tst_kvm_instance *inst,
64 uint64_t paddr)
65 {
66 int i;
67 uint64_t base;
68
69 for (i = 0; i < MAX_KVM_MEMSLOTS; i++) {
70 if (!inst->ram[i].userspace_addr)
71 continue;
72
73 base = inst->ram[i].guest_phys_addr;
74
75 if (paddr >= base && paddr - base < inst->ram[i].memory_size)
76 return i;
77 }
78
79 return -1;
80 }
81
tst_kvm_find_memslot(const struct tst_kvm_instance * inst,uint64_t addr)82 int tst_kvm_find_memslot(const struct tst_kvm_instance *inst, uint64_t addr)
83 {
84 addr = tst_kvm_get_phys_address(inst, addr);
85
86 if (!addr)
87 return -1;
88
89 return tst_kvm_find_phys_memslot(inst, addr);
90 }
91
tst_kvm_get_memptr(const struct tst_kvm_instance * inst,uint64_t addr)92 void *tst_kvm_get_memptr(const struct tst_kvm_instance *inst, uint64_t addr)
93 {
94 int slot;
95 char *ret;
96
97 addr = tst_kvm_get_phys_address(inst, addr);
98
99 if (!addr)
100 return NULL;
101
102 slot = tst_kvm_find_phys_memslot(inst, addr);
103
104 if (slot < 0)
105 return NULL;
106
107 ret = (char *)(uintptr_t)inst->ram[slot].userspace_addr;
108 return ret + (addr - inst->ram[slot].guest_phys_addr);
109 }
110
tst_kvm_print_result(const struct tst_kvm_instance * inst)111 void tst_kvm_print_result(const struct tst_kvm_instance *inst)
112 {
113 int ttype;
114 const struct tst_kvm_result *result = inst->result;
115 const char *file;
116
117 tst_kvm_validate_result(result->result);
118 ttype = TTYPE_RESULT(result->result);
119 file = tst_kvm_get_memptr(inst, result->file_addr);
120
121 if (ttype == TBROK)
122 tst_brk_(file, result->lineno, ttype, "%s", result->message);
123 else
124 tst_res_(file, result->lineno, ttype, "%s", result->message);
125 }
126
tst_kvm_alloc_memory(struct tst_kvm_instance * inst,unsigned int slot,uint64_t baseaddr,size_t size,unsigned int flags)127 void *tst_kvm_alloc_memory(struct tst_kvm_instance *inst, unsigned int slot,
128 uint64_t baseaddr, size_t size, unsigned int flags)
129 {
130 size_t pagesize, offset;
131 char *ret;
132 struct kvm_userspace_memory_region memslot = {
133 .slot = slot,
134 .flags = flags
135 };
136
137 if (slot >= MAX_KVM_MEMSLOTS)
138 tst_brk(TBROK, "Invalid KVM memory slot %u", slot);
139
140 pagesize = SAFE_SYSCONF(_SC_PAGESIZE);
141 offset = baseaddr % pagesize;
142 size = LTP_ALIGN(size + offset, pagesize);
143 ret = tst_alloc(size);
144
145 memslot.guest_phys_addr = baseaddr - offset;
146 memslot.memory_size = size;
147 memslot.userspace_addr = (uintptr_t)ret;
148 SAFE_IOCTL(inst->vm_fd, KVM_SET_USER_MEMORY_REGION, &memslot);
149 inst->ram[slot] = memslot;
150 return ret;
151 }
152
tst_kvm_get_cpuid(int sysfd)153 struct kvm_cpuid2 *tst_kvm_get_cpuid(int sysfd)
154 {
155 unsigned int count;
156 int result;
157 struct kvm_cpuid2 *ret;
158
159 if (!SAFE_IOCTL(sysfd, KVM_CHECK_EXTENSION, KVM_CAP_EXT_CPUID))
160 return NULL;
161
162 for (count = 8; count < 1 << 30; count *= 2) {
163 ret = SAFE_MALLOC(sizeof(struct kvm_cpuid2) +
164 count * sizeof(struct kvm_cpuid_entry2));
165 ret->nent = count;
166 errno = 0;
167 result = ioctl(sysfd, KVM_GET_SUPPORTED_CPUID, ret);
168
169 if (!result)
170 return ret;
171
172 free(ret);
173
174 if (errno != E2BIG)
175 break;
176 }
177
178 tst_brk(TBROK | TERRNO, "ioctl(KVM_GET_SUPPORTED_CPUID) failed");
179 return NULL;
180 }
181
tst_kvm_create_instance(struct tst_kvm_instance * inst,size_t ram_size)182 void tst_kvm_create_instance(struct tst_kvm_instance *inst, size_t ram_size)
183 {
184 int sys_fd;
185 size_t pagesize, result_pageaddr = KVM_RESULT_BASEADDR;
186 char *buf, *reset_ptr;
187 struct kvm_cpuid2 *cpuid_data;
188 const size_t payload_size = kvm_payload_end - kvm_payload_start;
189
190 memset(inst, 0, sizeof(struct tst_kvm_instance));
191 inst->vm_fd = -1;
192 inst->vcpu_fd = -1;
193 inst->vcpu_info = MAP_FAILED;
194
195 pagesize = SAFE_SYSCONF(_SC_PAGESIZE);
196 result_pageaddr -= result_pageaddr % pagesize;
197
198 if (payload_size + MIN_FREE_RAM > ram_size - VM_KERNEL_BASEADDR) {
199 ram_size = payload_size + MIN_FREE_RAM + VM_KERNEL_BASEADDR;
200 ram_size = LTP_ALIGN(ram_size, 1024 * 1024);
201 tst_res(TWARN, "RAM size increased to %zu bytes", ram_size);
202 }
203
204 if (ram_size > result_pageaddr) {
205 ram_size = result_pageaddr;
206 tst_res(TWARN, "RAM size truncated to %zu bytes", ram_size);
207 }
208
209 sys_fd = SAFE_OPEN("/dev/kvm", O_RDWR);
210 inst->vcpu_info_size = SAFE_IOCTL(sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
211 inst->vm_fd = SAFE_IOCTL(sys_fd, KVM_CREATE_VM, 0);
212 cpuid_data = tst_kvm_get_cpuid(sys_fd);
213 SAFE_CLOSE(sys_fd);
214
215 inst->vcpu_fd = SAFE_IOCTL(inst->vm_fd, KVM_CREATE_VCPU, 0);
216
217 if (cpuid_data) {
218 SAFE_IOCTL(inst->vcpu_fd, KVM_SET_CPUID2, cpuid_data);
219 free(cpuid_data);
220 }
221
222 inst->vcpu_info = SAFE_MMAP(NULL, inst->vcpu_info_size,
223 PROT_READ | PROT_WRITE, MAP_SHARED, inst->vcpu_fd, 0);
224
225 buf = tst_kvm_alloc_memory(inst, 0, 0, ram_size, 0);
226 memcpy(buf + VM_KERNEL_BASEADDR, kvm_payload_start, payload_size);
227 buf = tst_kvm_alloc_memory(inst, 1, KVM_RESULT_BASEADDR,
228 KVM_RESULT_SIZE, 0);
229 memset(buf, 0, KVM_RESULT_SIZE);
230
231 reset_ptr = buf + (VM_RESET_BASEADDR % pagesize);
232 memcpy(reset_ptr, tst_kvm_reset_code, sizeof(tst_kvm_reset_code));
233 inst->result = (struct tst_kvm_result *)(buf +
234 (KVM_RESULT_BASEADDR % pagesize));
235 inst->result->result = KVM_TNONE;
236 inst->result->message[0] = '\0';
237 }
238
tst_kvm_run_instance(struct tst_kvm_instance * inst,int exp_errno)239 int tst_kvm_run_instance(struct tst_kvm_instance *inst, int exp_errno)
240 {
241 struct kvm_regs regs;
242 int ret;
243
244 while (1) {
245 inst->result->result = KVM_TNONE;
246 inst->result->message[0] = '\0';
247 errno = 0;
248 ret = ioctl(inst->vcpu_fd, KVM_RUN, 0);
249
250 if (ret == -1) {
251 if (errno == exp_errno)
252 return ret;
253
254 tst_brk(TBROK | TERRNO, "ioctl(KVM_RUN) failed");
255 }
256
257 if (ret < 0) {
258 tst_brk(TBROK | TERRNO,
259 "Invalid ioctl(KVM_RUN) return value %d", ret);
260 }
261
262 if (inst->vcpu_info->exit_reason != KVM_EXIT_HLT) {
263 SAFE_IOCTL(inst->vcpu_fd, KVM_GET_REGS, ®s);
264 tst_brk(TBROK,
265 "Unexpected VM exit, RIP=0x%llx, reason=%u",
266 regs.rip, inst->vcpu_info->exit_reason);
267 }
268
269 if (inst->result->result == KVM_TEXIT)
270 break;
271
272 tst_kvm_print_result(inst);
273 }
274
275 return ret;
276 }
277
tst_kvm_destroy_instance(struct tst_kvm_instance * inst)278 void tst_kvm_destroy_instance(struct tst_kvm_instance *inst)
279 {
280 if (inst->vm_fd < 0)
281 return;
282
283 if (inst->vcpu_info != MAP_FAILED)
284 SAFE_MUNMAP(inst->vcpu_info, inst->vcpu_info_size);
285
286 if (inst->vcpu_fd >= 0)
287 SAFE_CLOSE(inst->vcpu_fd);
288
289 SAFE_CLOSE(inst->vm_fd);
290 memset(inst->ram, 0, sizeof(inst->ram));
291 }
292
tst_kvm_wait_guest(struct tst_kvm_instance * inst,int timeout_ms)293 int tst_kvm_wait_guest(struct tst_kvm_instance *inst, int timeout_ms)
294 {
295 volatile struct tst_kvm_result *result = inst->result;
296 int32_t res;
297 struct timespec start, now;
298
299 if (timeout_ms >= 0)
300 tst_clock_gettime(CLOCK_MONOTONIC, &start);
301
302 while ((res = result->result) != KVM_TSYNC) {
303 if (res == KVM_TEXIT)
304 return res;
305
306 if (timeout_ms >= 0) {
307 tst_clock_gettime(CLOCK_MONOTONIC, &now);
308
309 if (tst_timespec_diff_ms(now, start) >= timeout_ms)
310 return -1;
311 }
312
313 usleep(1000);
314 }
315
316 return 0;
317 }
318
tst_kvm_clear_guest_signal(struct tst_kvm_instance * inst)319 void tst_kvm_clear_guest_signal(struct tst_kvm_instance *inst)
320 {
321 inst->result->result = KVM_TNONE;
322 }
323
tst_kvm_setup(void)324 void tst_kvm_setup(void)
325 {
326
327 }
328
tst_kvm_run(void)329 void tst_kvm_run(void)
330 {
331 tst_kvm_create_instance(&test_vm, DEFAULT_RAM_SIZE);
332 tst_kvm_run_instance(&test_vm, 0);
333 tst_kvm_destroy_instance(&test_vm);
334 tst_free_all();
335 }
336
tst_kvm_cleanup(void)337 void tst_kvm_cleanup(void)
338 {
339 tst_kvm_destroy_instance(&test_vm);
340 }
341