1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM dirty page logging test
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8 #define _GNU_SOURCE /* for program_invocation_name */
9
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <pthread.h>
13 #include <semaphore.h>
14 #include <sys/types.h>
15 #include <signal.h>
16 #include <errno.h>
17 #include <linux/bitmap.h>
18 #include <linux/bitops.h>
19 #include <linux/atomic.h>
20 #include <asm/barrier.h>
21
22 #include "kvm_util.h"
23 #include "test_util.h"
24 #include "guest_modes.h"
25 #include "processor.h"
26
27 /* The memory slot index to track dirty pages */
28 #define TEST_MEM_SLOT_INDEX 1
29
30 /* Default guest test virtual memory offset */
31 #define DEFAULT_GUEST_TEST_MEM 0xc0000000
32
33 /* How many pages to dirty for each guest loop */
34 #define TEST_PAGES_PER_LOOP 1024
35
36 /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
37 #define TEST_HOST_LOOP_N 32UL
38
39 /* Interval for each host loop (ms) */
40 #define TEST_HOST_LOOP_INTERVAL 10UL
41
42 /* Dirty bitmaps are always little endian, so we need to swap on big endian */
43 #if defined(__s390x__)
44 # define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
45 # define test_bit_le(nr, addr) \
46 test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
47 # define set_bit_le(nr, addr) \
48 set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
49 # define clear_bit_le(nr, addr) \
50 clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
51 # define test_and_set_bit_le(nr, addr) \
52 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
53 # define test_and_clear_bit_le(nr, addr) \
54 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
55 #else
56 # define test_bit_le test_bit
57 # define set_bit_le set_bit
58 # define clear_bit_le clear_bit
59 # define test_and_set_bit_le test_and_set_bit
60 # define test_and_clear_bit_le test_and_clear_bit
61 #endif
62
63 #define TEST_DIRTY_RING_COUNT 65536
64
65 #define SIG_IPI SIGUSR1
66
67 /*
68 * Guest/Host shared variables. Ensure addr_gva2hva() and/or
69 * sync_global_to/from_guest() are used when accessing from
70 * the host. READ/WRITE_ONCE() should also be used with anything
71 * that may change.
72 */
73 static uint64_t host_page_size;
74 static uint64_t guest_page_size;
75 static uint64_t guest_num_pages;
76 static uint64_t random_array[TEST_PAGES_PER_LOOP];
77 static uint64_t iteration;
78
79 /*
80 * Guest physical memory offset of the testing memory slot.
81 * This will be set to the topmost valid physical address minus
82 * the test memory size.
83 */
84 static uint64_t guest_test_phys_mem;
85
86 /*
87 * Guest virtual memory offset of the testing memory slot.
88 * Must not conflict with identity mapped test code.
89 */
90 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
91
92 /*
93 * Continuously write to the first 8 bytes of a random pages within
94 * the testing memory region.
95 */
guest_code(void)96 static void guest_code(void)
97 {
98 uint64_t addr;
99 int i;
100
101 /*
102 * On s390x, all pages of a 1M segment are initially marked as dirty
103 * when a page of the segment is written to for the very first time.
104 * To compensate this specialty in this test, we need to touch all
105 * pages during the first iteration.
106 */
107 for (i = 0; i < guest_num_pages; i++) {
108 addr = guest_test_virt_mem + i * guest_page_size;
109 *(uint64_t *)addr = READ_ONCE(iteration);
110 }
111
112 while (true) {
113 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
114 addr = guest_test_virt_mem;
115 addr += (READ_ONCE(random_array[i]) % guest_num_pages)
116 * guest_page_size;
117 addr = align_down(addr, host_page_size);
118 *(uint64_t *)addr = READ_ONCE(iteration);
119 }
120
121 /* Tell the host that we need more random numbers */
122 GUEST_SYNC(1);
123 }
124 }
125
126 /* Host variables */
127 static bool host_quit;
128
129 /* Points to the test VM memory region on which we track dirty logs */
130 static void *host_test_mem;
131 static uint64_t host_num_pages;
132
133 /* For statistics only */
134 static uint64_t host_dirty_count;
135 static uint64_t host_clear_count;
136 static uint64_t host_track_next_count;
137
138 /* Whether dirty ring reset is requested, or finished */
139 static sem_t sem_vcpu_stop;
140 static sem_t sem_vcpu_cont;
141 /*
142 * This is only set by main thread, and only cleared by vcpu thread. It is
143 * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
144 * is the only place that we'll guarantee both "dirty bit" and "dirty data"
145 * will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
146 * after setting dirty bit but before the data is written.
147 */
148 static atomic_t vcpu_sync_stop_requested;
149 /*
150 * This is updated by the vcpu thread to tell the host whether it's a
151 * ring-full event. It should only be read until a sem_wait() of
152 * sem_vcpu_stop and before vcpu continues to run.
153 */
154 static bool dirty_ring_vcpu_ring_full;
155 /*
156 * This is only used for verifying the dirty pages. Dirty ring has a very
157 * tricky case when the ring just got full, kvm will do userspace exit due to
158 * ring full. When that happens, the very last PFN is set but actually the
159 * data is not changed (the guest WRITE is not really applied yet), because
160 * we found that the dirty ring is full, refused to continue the vcpu, and
161 * recorded the dirty gfn with the old contents.
162 *
163 * For this specific case, it's safe to skip checking this pfn for this
164 * bit, because it's a redundant bit, and when the write happens later the bit
165 * will be set again. We use this variable to always keep track of the latest
166 * dirty gfn we've collected, so that if a mismatch of data found later in the
167 * verifying process, we let it pass.
168 */
169 static uint64_t dirty_ring_last_page;
170
171 enum log_mode_t {
172 /* Only use KVM_GET_DIRTY_LOG for logging */
173 LOG_MODE_DIRTY_LOG = 0,
174
175 /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
176 LOG_MODE_CLEAR_LOG = 1,
177
178 /* Use dirty ring for logging */
179 LOG_MODE_DIRTY_RING = 2,
180
181 LOG_MODE_NUM,
182
183 /* Run all supported modes */
184 LOG_MODE_ALL = LOG_MODE_NUM,
185 };
186
187 /* Mode of logging to test. Default is to run all supported modes */
188 static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
189 /* Logging mode for current run */
190 static enum log_mode_t host_log_mode;
191 static pthread_t vcpu_thread;
192 static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
193
vcpu_kick(void)194 static void vcpu_kick(void)
195 {
196 pthread_kill(vcpu_thread, SIG_IPI);
197 }
198
199 /*
200 * In our test we do signal tricks, let's use a better version of
201 * sem_wait to avoid signal interrupts
202 */
sem_wait_until(sem_t * sem)203 static void sem_wait_until(sem_t *sem)
204 {
205 int ret;
206
207 do
208 ret = sem_wait(sem);
209 while (ret == -1 && errno == EINTR);
210 }
211
clear_log_supported(void)212 static bool clear_log_supported(void)
213 {
214 return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
215 }
216
clear_log_create_vm_done(struct kvm_vm * vm)217 static void clear_log_create_vm_done(struct kvm_vm *vm)
218 {
219 u64 manual_caps;
220
221 manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
222 TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
223 manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
224 KVM_DIRTY_LOG_INITIALLY_SET);
225 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
226 }
227
dirty_log_collect_dirty_pages(struct kvm_vcpu * vcpu,int slot,void * bitmap,uint32_t num_pages)228 static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
229 void *bitmap, uint32_t num_pages)
230 {
231 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
232 }
233
clear_log_collect_dirty_pages(struct kvm_vcpu * vcpu,int slot,void * bitmap,uint32_t num_pages)234 static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
235 void *bitmap, uint32_t num_pages)
236 {
237 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
238 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
239 }
240
241 /* Should only be called after a GUEST_SYNC */
vcpu_handle_sync_stop(void)242 static void vcpu_handle_sync_stop(void)
243 {
244 if (atomic_read(&vcpu_sync_stop_requested)) {
245 /* It means main thread is sleeping waiting */
246 atomic_set(&vcpu_sync_stop_requested, false);
247 sem_post(&sem_vcpu_stop);
248 sem_wait_until(&sem_vcpu_cont);
249 }
250 }
251
default_after_vcpu_run(struct kvm_vcpu * vcpu,int ret,int err)252 static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
253 {
254 struct kvm_run *run = vcpu->run;
255
256 TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
257 "vcpu run failed: errno=%d", err);
258
259 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
260 "Invalid guest sync status: exit_reason=%s\n",
261 exit_reason_str(run->exit_reason));
262
263 vcpu_handle_sync_stop();
264 }
265
dirty_ring_supported(void)266 static bool dirty_ring_supported(void)
267 {
268 return (kvm_has_cap(KVM_CAP_DIRTY_LOG_RING) ||
269 kvm_has_cap(KVM_CAP_DIRTY_LOG_RING_ACQ_REL));
270 }
271
dirty_ring_create_vm_done(struct kvm_vm * vm)272 static void dirty_ring_create_vm_done(struct kvm_vm *vm)
273 {
274 /*
275 * Switch to dirty ring mode after VM creation but before any
276 * of the vcpu creation.
277 */
278 vm_enable_dirty_ring(vm, test_dirty_ring_count *
279 sizeof(struct kvm_dirty_gfn));
280 }
281
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)282 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
283 {
284 return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
285 }
286
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)287 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
288 {
289 smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
290 }
291
dirty_ring_collect_one(struct kvm_dirty_gfn * dirty_gfns,int slot,void * bitmap,uint32_t num_pages,uint32_t * fetch_index)292 static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
293 int slot, void *bitmap,
294 uint32_t num_pages, uint32_t *fetch_index)
295 {
296 struct kvm_dirty_gfn *cur;
297 uint32_t count = 0;
298
299 while (true) {
300 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
301 if (!dirty_gfn_is_dirtied(cur))
302 break;
303 TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
304 "%u != %u", cur->slot, slot);
305 TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
306 "0x%llx >= 0x%x", cur->offset, num_pages);
307 //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
308 set_bit_le(cur->offset, bitmap);
309 dirty_ring_last_page = cur->offset;
310 dirty_gfn_set_collected(cur);
311 (*fetch_index)++;
312 count++;
313 }
314
315 return count;
316 }
317
dirty_ring_wait_vcpu(void)318 static void dirty_ring_wait_vcpu(void)
319 {
320 /* This makes sure that hardware PML cache flushed */
321 vcpu_kick();
322 sem_wait_until(&sem_vcpu_stop);
323 }
324
dirty_ring_continue_vcpu(void)325 static void dirty_ring_continue_vcpu(void)
326 {
327 pr_info("Notifying vcpu to continue\n");
328 sem_post(&sem_vcpu_cont);
329 }
330
dirty_ring_collect_dirty_pages(struct kvm_vcpu * vcpu,int slot,void * bitmap,uint32_t num_pages)331 static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
332 void *bitmap, uint32_t num_pages)
333 {
334 /* We only have one vcpu */
335 static uint32_t fetch_index = 0;
336 uint32_t count = 0, cleared;
337 bool continued_vcpu = false;
338
339 dirty_ring_wait_vcpu();
340
341 if (!dirty_ring_vcpu_ring_full) {
342 /*
343 * This is not a ring-full event, it's safe to allow
344 * vcpu to continue
345 */
346 dirty_ring_continue_vcpu();
347 continued_vcpu = true;
348 }
349
350 /* Only have one vcpu */
351 count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
352 slot, bitmap, num_pages, &fetch_index);
353
354 cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
355
356 /* Cleared pages should be the same as collected */
357 TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
358 "with collected (%u)", cleared, count);
359
360 if (!continued_vcpu) {
361 TEST_ASSERT(dirty_ring_vcpu_ring_full,
362 "Didn't continue vcpu even without ring full");
363 dirty_ring_continue_vcpu();
364 }
365
366 pr_info("Iteration %ld collected %u pages\n", iteration, count);
367 }
368
dirty_ring_after_vcpu_run(struct kvm_vcpu * vcpu,int ret,int err)369 static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
370 {
371 struct kvm_run *run = vcpu->run;
372
373 /* A ucall-sync or ring-full event is allowed */
374 if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
375 /* We should allow this to continue */
376 ;
377 } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
378 (ret == -1 && err == EINTR)) {
379 /* Update the flag first before pause */
380 WRITE_ONCE(dirty_ring_vcpu_ring_full,
381 run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
382 sem_post(&sem_vcpu_stop);
383 pr_info("vcpu stops because %s...\n",
384 dirty_ring_vcpu_ring_full ?
385 "dirty ring is full" : "vcpu is kicked out");
386 sem_wait_until(&sem_vcpu_cont);
387 pr_info("vcpu continues now.\n");
388 } else {
389 TEST_ASSERT(false, "Invalid guest sync status: "
390 "exit_reason=%s\n",
391 exit_reason_str(run->exit_reason));
392 }
393 }
394
dirty_ring_before_vcpu_join(void)395 static void dirty_ring_before_vcpu_join(void)
396 {
397 /* Kick another round of vcpu just to make sure it will quit */
398 sem_post(&sem_vcpu_cont);
399 }
400
401 struct log_mode {
402 const char *name;
403 /* Return true if this mode is supported, otherwise false */
404 bool (*supported)(void);
405 /* Hook when the vm creation is done (before vcpu creation) */
406 void (*create_vm_done)(struct kvm_vm *vm);
407 /* Hook to collect the dirty pages into the bitmap provided */
408 void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
409 void *bitmap, uint32_t num_pages);
410 /* Hook to call when after each vcpu run */
411 void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
412 void (*before_vcpu_join) (void);
413 } log_modes[LOG_MODE_NUM] = {
414 {
415 .name = "dirty-log",
416 .collect_dirty_pages = dirty_log_collect_dirty_pages,
417 .after_vcpu_run = default_after_vcpu_run,
418 },
419 {
420 .name = "clear-log",
421 .supported = clear_log_supported,
422 .create_vm_done = clear_log_create_vm_done,
423 .collect_dirty_pages = clear_log_collect_dirty_pages,
424 .after_vcpu_run = default_after_vcpu_run,
425 },
426 {
427 .name = "dirty-ring",
428 .supported = dirty_ring_supported,
429 .create_vm_done = dirty_ring_create_vm_done,
430 .collect_dirty_pages = dirty_ring_collect_dirty_pages,
431 .before_vcpu_join = dirty_ring_before_vcpu_join,
432 .after_vcpu_run = dirty_ring_after_vcpu_run,
433 },
434 };
435
436 /*
437 * We use this bitmap to track some pages that should have its dirty
438 * bit set in the _next_ iteration. For example, if we detected the
439 * page value changed to current iteration but at the same time the
440 * page bit is cleared in the latest bitmap, then the system must
441 * report that write in the next get dirty log call.
442 */
443 static unsigned long *host_bmap_track;
444
log_modes_dump(void)445 static void log_modes_dump(void)
446 {
447 int i;
448
449 printf("all");
450 for (i = 0; i < LOG_MODE_NUM; i++)
451 printf(", %s", log_modes[i].name);
452 printf("\n");
453 }
454
log_mode_supported(void)455 static bool log_mode_supported(void)
456 {
457 struct log_mode *mode = &log_modes[host_log_mode];
458
459 if (mode->supported)
460 return mode->supported();
461
462 return true;
463 }
464
log_mode_create_vm_done(struct kvm_vm * vm)465 static void log_mode_create_vm_done(struct kvm_vm *vm)
466 {
467 struct log_mode *mode = &log_modes[host_log_mode];
468
469 if (mode->create_vm_done)
470 mode->create_vm_done(vm);
471 }
472
log_mode_collect_dirty_pages(struct kvm_vcpu * vcpu,int slot,void * bitmap,uint32_t num_pages)473 static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
474 void *bitmap, uint32_t num_pages)
475 {
476 struct log_mode *mode = &log_modes[host_log_mode];
477
478 TEST_ASSERT(mode->collect_dirty_pages != NULL,
479 "collect_dirty_pages() is required for any log mode!");
480 mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
481 }
482
log_mode_after_vcpu_run(struct kvm_vcpu * vcpu,int ret,int err)483 static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
484 {
485 struct log_mode *mode = &log_modes[host_log_mode];
486
487 if (mode->after_vcpu_run)
488 mode->after_vcpu_run(vcpu, ret, err);
489 }
490
log_mode_before_vcpu_join(void)491 static void log_mode_before_vcpu_join(void)
492 {
493 struct log_mode *mode = &log_modes[host_log_mode];
494
495 if (mode->before_vcpu_join)
496 mode->before_vcpu_join();
497 }
498
generate_random_array(uint64_t * guest_array,uint64_t size)499 static void generate_random_array(uint64_t *guest_array, uint64_t size)
500 {
501 uint64_t i;
502
503 for (i = 0; i < size; i++)
504 guest_array[i] = random();
505 }
506
vcpu_worker(void * data)507 static void *vcpu_worker(void *data)
508 {
509 int ret;
510 struct kvm_vcpu *vcpu = data;
511 struct kvm_vm *vm = vcpu->vm;
512 uint64_t *guest_array;
513 uint64_t pages_count = 0;
514 struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
515 + sizeof(sigset_t));
516 sigset_t *sigset = (sigset_t *) &sigmask->sigset;
517
518 /*
519 * SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
520 * ioctl to return with -EINTR, but it is still pending and we need
521 * to accept it with the sigwait.
522 */
523 sigmask->len = 8;
524 pthread_sigmask(0, NULL, sigset);
525 sigdelset(sigset, SIG_IPI);
526 vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
527
528 sigemptyset(sigset);
529 sigaddset(sigset, SIG_IPI);
530
531 guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
532
533 while (!READ_ONCE(host_quit)) {
534 /* Clear any existing kick signals */
535 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
536 pages_count += TEST_PAGES_PER_LOOP;
537 /* Let the guest dirty the random pages */
538 ret = __vcpu_run(vcpu);
539 if (ret == -1 && errno == EINTR) {
540 int sig = -1;
541 sigwait(sigset, &sig);
542 assert(sig == SIG_IPI);
543 }
544 log_mode_after_vcpu_run(vcpu, ret, errno);
545 }
546
547 pr_info("Dirtied %"PRIu64" pages\n", pages_count);
548
549 return NULL;
550 }
551
vm_dirty_log_verify(enum vm_guest_mode mode,unsigned long * bmap)552 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
553 {
554 uint64_t step = vm_num_host_pages(mode, 1);
555 uint64_t page;
556 uint64_t *value_ptr;
557 uint64_t min_iter = 0;
558
559 for (page = 0; page < host_num_pages; page += step) {
560 value_ptr = host_test_mem + page * host_page_size;
561
562 /* If this is a special page that we were tracking... */
563 if (test_and_clear_bit_le(page, host_bmap_track)) {
564 host_track_next_count++;
565 TEST_ASSERT(test_bit_le(page, bmap),
566 "Page %"PRIu64" should have its dirty bit "
567 "set in this iteration but it is missing",
568 page);
569 }
570
571 if (test_and_clear_bit_le(page, bmap)) {
572 bool matched;
573
574 host_dirty_count++;
575
576 /*
577 * If the bit is set, the value written onto
578 * the corresponding page should be either the
579 * previous iteration number or the current one.
580 */
581 matched = (*value_ptr == iteration ||
582 *value_ptr == iteration - 1);
583
584 if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
585 if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
586 /*
587 * Short answer: this case is special
588 * only for dirty ring test where the
589 * page is the last page before a kvm
590 * dirty ring full in iteration N-2.
591 *
592 * Long answer: Assuming ring size R,
593 * one possible condition is:
594 *
595 * main thr vcpu thr
596 * -------- --------
597 * iter=1
598 * write 1 to page 0~(R-1)
599 * full, vmexit
600 * collect 0~(R-1)
601 * kick vcpu
602 * write 1 to (R-1)~(2R-2)
603 * full, vmexit
604 * iter=2
605 * collect (R-1)~(2R-2)
606 * kick vcpu
607 * write 1 to (2R-2)
608 * (NOTE!!! "1" cached in cpu reg)
609 * write 2 to (2R-1)~(3R-3)
610 * full, vmexit
611 * iter=3
612 * collect (2R-2)~(3R-3)
613 * (here if we read value on page
614 * "2R-2" is 1, while iter=3!!!)
615 *
616 * This however can only happen once per iteration.
617 */
618 min_iter = iteration - 1;
619 continue;
620 } else if (page == dirty_ring_last_page) {
621 /*
622 * Please refer to comments in
623 * dirty_ring_last_page.
624 */
625 continue;
626 }
627 }
628
629 TEST_ASSERT(matched,
630 "Set page %"PRIu64" value %"PRIu64
631 " incorrect (iteration=%"PRIu64")",
632 page, *value_ptr, iteration);
633 } else {
634 host_clear_count++;
635 /*
636 * If cleared, the value written can be any
637 * value smaller or equals to the iteration
638 * number. Note that the value can be exactly
639 * (iteration-1) if that write can happen
640 * like this:
641 *
642 * (1) increase loop count to "iteration-1"
643 * (2) write to page P happens (with value
644 * "iteration-1")
645 * (3) get dirty log for "iteration-1"; we'll
646 * see that page P bit is set (dirtied),
647 * and not set the bit in host_bmap_track
648 * (4) increase loop count to "iteration"
649 * (which is current iteration)
650 * (5) get dirty log for current iteration,
651 * we'll see that page P is cleared, with
652 * value "iteration-1".
653 */
654 TEST_ASSERT(*value_ptr <= iteration,
655 "Clear page %"PRIu64" value %"PRIu64
656 " incorrect (iteration=%"PRIu64")",
657 page, *value_ptr, iteration);
658 if (*value_ptr == iteration) {
659 /*
660 * This page is _just_ modified; it
661 * should report its dirtyness in the
662 * next run
663 */
664 set_bit_le(page, host_bmap_track);
665 }
666 }
667 }
668 }
669
create_vm(enum vm_guest_mode mode,struct kvm_vcpu ** vcpu,uint64_t extra_mem_pages,void * guest_code)670 static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
671 uint64_t extra_mem_pages, void *guest_code)
672 {
673 struct kvm_vm *vm;
674
675 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
676
677 vm = __vm_create(mode, 1, extra_mem_pages);
678
679 log_mode_create_vm_done(vm);
680 *vcpu = vm_vcpu_add(vm, 0, guest_code);
681 return vm;
682 }
683
684 #define DIRTY_MEM_BITS 30 /* 1G */
685 #define PAGE_SHIFT_4K 12
686
687 struct test_params {
688 unsigned long iterations;
689 unsigned long interval;
690 uint64_t phys_offset;
691 };
692
run_test(enum vm_guest_mode mode,void * arg)693 static void run_test(enum vm_guest_mode mode, void *arg)
694 {
695 struct test_params *p = arg;
696 struct kvm_vcpu *vcpu;
697 struct kvm_vm *vm;
698 unsigned long *bmap;
699
700 if (!log_mode_supported()) {
701 print_skip("Log mode '%s' not supported",
702 log_modes[host_log_mode].name);
703 return;
704 }
705
706 /*
707 * We reserve page table for 2 times of extra dirty mem which
708 * will definitely cover the original (1G+) test range. Here
709 * we do the calculation with 4K page size which is the
710 * smallest so the page number will be enough for all archs
711 * (e.g., 64K page size guest will need even less memory for
712 * page tables).
713 */
714 vm = create_vm(mode, &vcpu,
715 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
716
717 guest_page_size = vm->page_size;
718 /*
719 * A little more than 1G of guest page sized pages. Cover the
720 * case where the size is not aligned to 64 pages.
721 */
722 guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
723 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
724
725 host_page_size = getpagesize();
726 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
727
728 if (!p->phys_offset) {
729 guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
730 guest_page_size;
731 guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
732 } else {
733 guest_test_phys_mem = p->phys_offset;
734 }
735
736 #ifdef __s390x__
737 /* Align to 1M (segment size) */
738 guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
739 #endif
740
741 pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
742
743 bmap = bitmap_zalloc(host_num_pages);
744 host_bmap_track = bitmap_zalloc(host_num_pages);
745
746 /* Add an extra memory slot for testing dirty logging */
747 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
748 guest_test_phys_mem,
749 TEST_MEM_SLOT_INDEX,
750 guest_num_pages,
751 KVM_MEM_LOG_DIRTY_PAGES);
752
753 /* Do mapping for the dirty track memory slot */
754 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
755
756 /* Cache the HVA pointer of the region */
757 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
758
759 ucall_init(vm, NULL);
760
761 /* Export the shared variables to the guest */
762 sync_global_to_guest(vm, host_page_size);
763 sync_global_to_guest(vm, guest_page_size);
764 sync_global_to_guest(vm, guest_test_virt_mem);
765 sync_global_to_guest(vm, guest_num_pages);
766
767 /* Start the iterations */
768 iteration = 1;
769 sync_global_to_guest(vm, iteration);
770 host_quit = false;
771 host_dirty_count = 0;
772 host_clear_count = 0;
773 host_track_next_count = 0;
774
775 pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
776
777 while (iteration < p->iterations) {
778 /* Give the vcpu thread some time to dirty some pages */
779 usleep(p->interval * 1000);
780 log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
781 bmap, host_num_pages);
782
783 /*
784 * See vcpu_sync_stop_requested definition for details on why
785 * we need to stop vcpu when verify data.
786 */
787 atomic_set(&vcpu_sync_stop_requested, true);
788 sem_wait_until(&sem_vcpu_stop);
789 /*
790 * NOTE: for dirty ring, it's possible that we didn't stop at
791 * GUEST_SYNC but instead we stopped because ring is full;
792 * that's okay too because ring full means we're only missing
793 * the flush of the last page, and since we handle the last
794 * page specially verification will succeed anyway.
795 */
796 assert(host_log_mode == LOG_MODE_DIRTY_RING ||
797 atomic_read(&vcpu_sync_stop_requested) == false);
798 vm_dirty_log_verify(mode, bmap);
799 sem_post(&sem_vcpu_cont);
800
801 iteration++;
802 sync_global_to_guest(vm, iteration);
803 }
804
805 /* Tell the vcpu thread to quit */
806 host_quit = true;
807 log_mode_before_vcpu_join();
808 pthread_join(vcpu_thread, NULL);
809
810 pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
811 "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
812 host_track_next_count);
813
814 free(bmap);
815 free(host_bmap_track);
816 ucall_uninit(vm);
817 kvm_vm_free(vm);
818 }
819
help(char * name)820 static void help(char *name)
821 {
822 puts("");
823 printf("usage: %s [-h] [-i iterations] [-I interval] "
824 "[-p offset] [-m mode]\n", name);
825 puts("");
826 printf(" -c: specify dirty ring size, in number of entries\n");
827 printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
828 TEST_DIRTY_RING_COUNT);
829 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
830 TEST_HOST_LOOP_N);
831 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
832 TEST_HOST_LOOP_INTERVAL);
833 printf(" -p: specify guest physical test memory offset\n"
834 " Warning: a low offset can conflict with the loaded test code.\n");
835 printf(" -M: specify the host logging mode "
836 "(default: run all log modes). Supported modes: \n\t");
837 log_modes_dump();
838 guest_modes_help();
839 puts("");
840 exit(0);
841 }
842
main(int argc,char * argv[])843 int main(int argc, char *argv[])
844 {
845 struct test_params p = {
846 .iterations = TEST_HOST_LOOP_N,
847 .interval = TEST_HOST_LOOP_INTERVAL,
848 };
849 int opt, i;
850 sigset_t sigset;
851
852 sem_init(&sem_vcpu_stop, 0, 0);
853 sem_init(&sem_vcpu_cont, 0, 0);
854
855 guest_modes_append_default();
856
857 while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
858 switch (opt) {
859 case 'c':
860 test_dirty_ring_count = strtol(optarg, NULL, 10);
861 break;
862 case 'i':
863 p.iterations = strtol(optarg, NULL, 10);
864 break;
865 case 'I':
866 p.interval = strtol(optarg, NULL, 10);
867 break;
868 case 'p':
869 p.phys_offset = strtoull(optarg, NULL, 0);
870 break;
871 case 'm':
872 guest_modes_cmdline(optarg);
873 break;
874 case 'M':
875 if (!strcmp(optarg, "all")) {
876 host_log_mode_option = LOG_MODE_ALL;
877 break;
878 }
879 for (i = 0; i < LOG_MODE_NUM; i++) {
880 if (!strcmp(optarg, log_modes[i].name)) {
881 pr_info("Setting log mode to: '%s'\n",
882 optarg);
883 host_log_mode_option = i;
884 break;
885 }
886 }
887 if (i == LOG_MODE_NUM) {
888 printf("Log mode '%s' invalid. Please choose "
889 "from: ", optarg);
890 log_modes_dump();
891 exit(1);
892 }
893 break;
894 case 'h':
895 default:
896 help(argv[0]);
897 break;
898 }
899 }
900
901 TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
902 TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
903
904 pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
905 p.iterations, p.interval);
906
907 srandom(time(0));
908
909 /* Ensure that vCPU threads start with SIG_IPI blocked. */
910 sigemptyset(&sigset);
911 sigaddset(&sigset, SIG_IPI);
912 pthread_sigmask(SIG_BLOCK, &sigset, NULL);
913
914 if (host_log_mode_option == LOG_MODE_ALL) {
915 /* Run each log mode */
916 for (i = 0; i < LOG_MODE_NUM; i++) {
917 pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
918 host_log_mode = i;
919 for_each_guest_mode(run_test, &p);
920 }
921 } else {
922 host_log_mode = host_log_mode_option;
923 for_each_guest_mode(run_test, &p);
924 }
925
926 return 0;
927 }
928