1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * vgic_irq.c - Test userspace injection of IRQs
4 *
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
8 * it received it.
9 */
10
11 #include <asm/kvm.h>
12 #include <asm/kvm_para.h>
13 #include <sys/eventfd.h>
14 #include <linux/sizes.h>
15
16 #include "processor.h"
17 #include "test_util.h"
18 #include "kvm_util.h"
19 #include "gic.h"
20 #include "gic_v3.h"
21 #include "vgic.h"
22
23 #define GICD_BASE_GPA 0x08000000ULL
24 #define GICR_BASE_GPA 0x080A0000ULL
25
26 /*
27 * Stores the user specified args; it's passed to the guest and to every test
28 * function.
29 */
30 struct test_args {
31 uint32_t nr_irqs; /* number of KVM supported IRQs. */
32 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
33 bool level_sensitive; /* 1 is level, 0 is edge */
34 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
35 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
36 };
37
38 /*
39 * KVM implements 32 priority levels:
40 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
41 *
42 * Note that these macros will still be correct in the case that KVM implements
43 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
44 */
45 #define KVM_NUM_PRIOS 32
46 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
47 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
48 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
49 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
50 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
51 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
52
53 static void *dist = (void *)GICD_BASE_GPA;
54 static void *redist = (void *)GICR_BASE_GPA;
55
56 /*
57 * The kvm_inject_* utilities are used by the guest to ask the host to inject
58 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
59 */
60
61 typedef enum {
62 KVM_INJECT_EDGE_IRQ_LINE = 1,
63 KVM_SET_IRQ_LINE,
64 KVM_SET_IRQ_LINE_HIGH,
65 KVM_SET_LEVEL_INFO_HIGH,
66 KVM_INJECT_IRQFD,
67 KVM_WRITE_ISPENDR,
68 KVM_WRITE_ISACTIVER,
69 } kvm_inject_cmd;
70
71 struct kvm_inject_args {
72 kvm_inject_cmd cmd;
73 uint32_t first_intid;
74 uint32_t num;
75 int level;
76 bool expect_failure;
77 };
78
79 /* Used on the guest side to perform the hypercall. */
80 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
81 uint32_t num, int level, bool expect_failure);
82
83 /* Used on the host side to get the hypercall info. */
84 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
85 struct kvm_inject_args *args);
86
87 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
88 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
89
90 #define KVM_INJECT_MULTI(cmd, intid, num) \
91 _KVM_INJECT_MULTI(cmd, intid, num, false)
92
93 #define _KVM_INJECT(cmd, intid, expect_failure) \
94 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
95
96 #define KVM_INJECT(cmd, intid) \
97 _KVM_INJECT_MULTI(cmd, intid, 1, false)
98
99 #define KVM_ACTIVATE(cmd, intid) \
100 kvm_inject_call(cmd, intid, 1, 1, false);
101
102 struct kvm_inject_desc {
103 kvm_inject_cmd cmd;
104 /* can inject PPIs, PPIs, and/or SPIs. */
105 bool sgi, ppi, spi;
106 };
107
108 static struct kvm_inject_desc inject_edge_fns[] = {
109 /* sgi ppi spi */
110 { KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
111 { KVM_INJECT_IRQFD, false, false, true },
112 { KVM_WRITE_ISPENDR, true, false, true },
113 { 0, },
114 };
115
116 static struct kvm_inject_desc inject_level_fns[] = {
117 /* sgi ppi spi */
118 { KVM_SET_IRQ_LINE_HIGH, false, true, true },
119 { KVM_SET_LEVEL_INFO_HIGH, false, true, true },
120 { KVM_INJECT_IRQFD, false, false, true },
121 { KVM_WRITE_ISPENDR, false, true, true },
122 { 0, },
123 };
124
125 static struct kvm_inject_desc set_active_fns[] = {
126 /* sgi ppi spi */
127 { KVM_WRITE_ISACTIVER, true, true, true },
128 { 0, },
129 };
130
131 #define for_each_inject_fn(t, f) \
132 for ((f) = (t); (f)->cmd; (f)++)
133
134 #define for_each_supported_inject_fn(args, t, f) \
135 for_each_inject_fn(t, f) \
136 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
137
138 #define for_each_supported_activate_fn(args, t, f) \
139 for_each_supported_inject_fn((args), (t), (f))
140
141 /* Shared between the guest main thread and the IRQ handlers. */
142 volatile uint64_t irq_handled;
143 volatile uint32_t irqnr_received[MAX_SPI + 1];
144
reset_stats(void)145 static void reset_stats(void)
146 {
147 int i;
148
149 irq_handled = 0;
150 for (i = 0; i <= MAX_SPI; i++)
151 irqnr_received[i] = 0;
152 }
153
gic_read_ap1r0(void)154 static uint64_t gic_read_ap1r0(void)
155 {
156 uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
157
158 dsb(sy);
159 return reg;
160 }
161
gic_write_ap1r0(uint64_t val)162 static void gic_write_ap1r0(uint64_t val)
163 {
164 write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
165 isb();
166 }
167
168 static void guest_set_irq_line(uint32_t intid, uint32_t level);
169
guest_irq_generic_handler(bool eoi_split,bool level_sensitive)170 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
171 {
172 uint32_t intid = gic_get_and_ack_irq();
173
174 if (intid == IAR_SPURIOUS)
175 return;
176
177 GUEST_ASSERT(gic_irq_get_active(intid));
178
179 if (!level_sensitive)
180 GUEST_ASSERT(!gic_irq_get_pending(intid));
181
182 if (level_sensitive)
183 guest_set_irq_line(intid, 0);
184
185 GUEST_ASSERT(intid < MAX_SPI);
186 irqnr_received[intid] += 1;
187 irq_handled += 1;
188
189 gic_set_eoi(intid);
190 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
191 if (eoi_split)
192 gic_set_dir(intid);
193
194 GUEST_ASSERT(!gic_irq_get_active(intid));
195 GUEST_ASSERT(!gic_irq_get_pending(intid));
196 }
197
kvm_inject_call(kvm_inject_cmd cmd,uint32_t first_intid,uint32_t num,int level,bool expect_failure)198 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
199 uint32_t num, int level, bool expect_failure)
200 {
201 struct kvm_inject_args args = {
202 .cmd = cmd,
203 .first_intid = first_intid,
204 .num = num,
205 .level = level,
206 .expect_failure = expect_failure,
207 };
208 GUEST_SYNC(&args);
209 }
210
211 #define GUEST_ASSERT_IAR_EMPTY() \
212 do { \
213 uint32_t _intid; \
214 _intid = gic_get_and_ack_irq(); \
215 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
216 } while (0)
217
218 #define CAT_HELPER(a, b) a ## b
219 #define CAT(a, b) CAT_HELPER(a, b)
220 #define PREFIX guest_irq_handler_
221 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
222 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
223 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
224 { \
225 guest_irq_generic_handler(split, lev); \
226 }
227
228 GENERATE_GUEST_IRQ_HANDLER(0, 0);
229 GENERATE_GUEST_IRQ_HANDLER(0, 1);
230 GENERATE_GUEST_IRQ_HANDLER(1, 0);
231 GENERATE_GUEST_IRQ_HANDLER(1, 1);
232
233 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
234 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
235 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
236 };
237
reset_priorities(struct test_args * args)238 static void reset_priorities(struct test_args *args)
239 {
240 int i;
241
242 for (i = 0; i < args->nr_irqs; i++)
243 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
244 }
245
guest_set_irq_line(uint32_t intid,uint32_t level)246 static void guest_set_irq_line(uint32_t intid, uint32_t level)
247 {
248 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
249 }
250
test_inject_fail(struct test_args * args,uint32_t intid,kvm_inject_cmd cmd)251 static void test_inject_fail(struct test_args *args,
252 uint32_t intid, kvm_inject_cmd cmd)
253 {
254 reset_stats();
255
256 _KVM_INJECT(cmd, intid, true);
257 /* no IRQ to handle on entry */
258
259 GUEST_ASSERT_EQ(irq_handled, 0);
260 GUEST_ASSERT_IAR_EMPTY();
261 }
262
guest_inject(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)263 static void guest_inject(struct test_args *args,
264 uint32_t first_intid, uint32_t num,
265 kvm_inject_cmd cmd)
266 {
267 uint32_t i;
268
269 reset_stats();
270
271 /* Cycle over all priorities to make things more interesting. */
272 for (i = first_intid; i < num + first_intid; i++)
273 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
274
275 asm volatile("msr daifset, #2" : : : "memory");
276 KVM_INJECT_MULTI(cmd, first_intid, num);
277
278 while (irq_handled < num) {
279 asm volatile("wfi\n"
280 "msr daifclr, #2\n"
281 /* handle IRQ */
282 "msr daifset, #2\n"
283 : : : "memory");
284 }
285 asm volatile("msr daifclr, #2" : : : "memory");
286
287 GUEST_ASSERT_EQ(irq_handled, num);
288 for (i = first_intid; i < num + first_intid; i++)
289 GUEST_ASSERT_EQ(irqnr_received[i], 1);
290 GUEST_ASSERT_IAR_EMPTY();
291
292 reset_priorities(args);
293 }
294
295 /*
296 * Restore the active state of multiple concurrent IRQs (given by
297 * concurrent_irqs). This does what a live-migration would do on the
298 * destination side assuming there are some active IRQs that were not
299 * deactivated yet.
300 */
guest_restore_active(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)301 static void guest_restore_active(struct test_args *args,
302 uint32_t first_intid, uint32_t num,
303 kvm_inject_cmd cmd)
304 {
305 uint32_t prio, intid, ap1r;
306 int i;
307
308 /*
309 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
310 * in descending order, so intid+1 can preempt intid.
311 */
312 for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
313 GUEST_ASSERT(prio >= 0);
314 intid = i + first_intid;
315 gic_set_priority(intid, prio);
316 }
317
318 /*
319 * In a real migration, KVM would restore all GIC state before running
320 * guest code.
321 */
322 for (i = 0; i < num; i++) {
323 intid = i + first_intid;
324 KVM_ACTIVATE(cmd, intid);
325 ap1r = gic_read_ap1r0();
326 ap1r |= 1U << i;
327 gic_write_ap1r0(ap1r);
328 }
329
330 /* This is where the "migration" would occur. */
331
332 /* finish handling the IRQs starting with the highest priority one. */
333 for (i = 0; i < num; i++) {
334 intid = num - i - 1 + first_intid;
335 gic_set_eoi(intid);
336 if (args->eoi_split)
337 gic_set_dir(intid);
338 }
339
340 for (i = 0; i < num; i++)
341 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
342 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
343 GUEST_ASSERT_IAR_EMPTY();
344 }
345
346 /*
347 * Polls the IAR until it's not a spurious interrupt.
348 *
349 * This function should only be used in test_inject_preemption (with IRQs
350 * masked).
351 */
wait_for_and_activate_irq(void)352 static uint32_t wait_for_and_activate_irq(void)
353 {
354 uint32_t intid;
355
356 do {
357 asm volatile("wfi" : : : "memory");
358 intid = gic_get_and_ack_irq();
359 } while (intid == IAR_SPURIOUS);
360
361 return intid;
362 }
363
364 /*
365 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
366 * handle them without handling the actual exceptions. This is done by masking
367 * interrupts for the whole test.
368 */
test_inject_preemption(struct test_args * args,uint32_t first_intid,int num,kvm_inject_cmd cmd)369 static void test_inject_preemption(struct test_args *args,
370 uint32_t first_intid, int num,
371 kvm_inject_cmd cmd)
372 {
373 uint32_t intid, prio, step = KVM_PRIO_STEPS;
374 int i;
375
376 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
377 * in descending order, so intid+1 can preempt intid.
378 */
379 for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
380 GUEST_ASSERT(prio >= 0);
381 intid = i + first_intid;
382 gic_set_priority(intid, prio);
383 }
384
385 local_irq_disable();
386
387 for (i = 0; i < num; i++) {
388 uint32_t tmp;
389 intid = i + first_intid;
390 KVM_INJECT(cmd, intid);
391 /* Each successive IRQ will preempt the previous one. */
392 tmp = wait_for_and_activate_irq();
393 GUEST_ASSERT_EQ(tmp, intid);
394 if (args->level_sensitive)
395 guest_set_irq_line(intid, 0);
396 }
397
398 /* finish handling the IRQs starting with the highest priority one. */
399 for (i = 0; i < num; i++) {
400 intid = num - i - 1 + first_intid;
401 gic_set_eoi(intid);
402 if (args->eoi_split)
403 gic_set_dir(intid);
404 }
405
406 local_irq_enable();
407
408 for (i = 0; i < num; i++)
409 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
410 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
411 GUEST_ASSERT_IAR_EMPTY();
412
413 reset_priorities(args);
414 }
415
test_injection(struct test_args * args,struct kvm_inject_desc * f)416 static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
417 {
418 uint32_t nr_irqs = args->nr_irqs;
419
420 if (f->sgi) {
421 guest_inject(args, MIN_SGI, 1, f->cmd);
422 guest_inject(args, 0, 16, f->cmd);
423 }
424
425 if (f->ppi)
426 guest_inject(args, MIN_PPI, 1, f->cmd);
427
428 if (f->spi) {
429 guest_inject(args, MIN_SPI, 1, f->cmd);
430 guest_inject(args, nr_irqs - 1, 1, f->cmd);
431 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
432 }
433 }
434
test_injection_failure(struct test_args * args,struct kvm_inject_desc * f)435 static void test_injection_failure(struct test_args *args,
436 struct kvm_inject_desc *f)
437 {
438 uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
442 test_inject_fail(args, bad_intid[i], f->cmd);
443 }
444
test_preemption(struct test_args * args,struct kvm_inject_desc * f)445 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
446 {
447 /*
448 * Test up to 4 levels of preemption. The reason is that KVM doesn't
449 * currently implement the ability to have more than the number-of-LRs
450 * number of concurrently active IRQs. The number of LRs implemented is
451 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
452 */
453 if (f->sgi)
454 test_inject_preemption(args, MIN_SGI, 4, f->cmd);
455
456 if (f->ppi)
457 test_inject_preemption(args, MIN_PPI, 4, f->cmd);
458
459 if (f->spi)
460 test_inject_preemption(args, MIN_SPI, 4, f->cmd);
461 }
462
test_restore_active(struct test_args * args,struct kvm_inject_desc * f)463 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
464 {
465 /* Test up to 4 active IRQs. Same reason as in test_preemption. */
466 if (f->sgi)
467 guest_restore_active(args, MIN_SGI, 4, f->cmd);
468
469 if (f->ppi)
470 guest_restore_active(args, MIN_PPI, 4, f->cmd);
471
472 if (f->spi)
473 guest_restore_active(args, MIN_SPI, 4, f->cmd);
474 }
475
guest_code(struct test_args * args)476 static void guest_code(struct test_args *args)
477 {
478 uint32_t i, nr_irqs = args->nr_irqs;
479 bool level_sensitive = args->level_sensitive;
480 struct kvm_inject_desc *f, *inject_fns;
481
482 gic_init(GIC_V3, 1, dist, redist);
483
484 for (i = 0; i < nr_irqs; i++)
485 gic_irq_enable(i);
486
487 for (i = MIN_SPI; i < nr_irqs; i++)
488 gic_irq_set_config(i, !level_sensitive);
489
490 gic_set_eoi_split(args->eoi_split);
491
492 reset_priorities(args);
493 gic_set_priority_mask(CPU_PRIO_MASK);
494
495 inject_fns = level_sensitive ? inject_level_fns
496 : inject_edge_fns;
497
498 local_irq_enable();
499
500 /* Start the tests. */
501 for_each_supported_inject_fn(args, inject_fns, f) {
502 test_injection(args, f);
503 test_preemption(args, f);
504 test_injection_failure(args, f);
505 }
506
507 /*
508 * Restore the active state of IRQs. This would happen when live
509 * migrating IRQs in the middle of being handled.
510 */
511 for_each_supported_activate_fn(args, set_active_fns, f)
512 test_restore_active(args, f);
513
514 GUEST_DONE();
515 }
516
kvm_irq_line_check(struct kvm_vm * vm,uint32_t intid,int level,struct test_args * test_args,bool expect_failure)517 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
518 struct test_args *test_args, bool expect_failure)
519 {
520 int ret;
521
522 if (!expect_failure) {
523 kvm_arm_irq_line(vm, intid, level);
524 } else {
525 /* The interface doesn't allow larger intid's. */
526 if (intid > KVM_ARM_IRQ_NUM_MASK)
527 return;
528
529 ret = _kvm_arm_irq_line(vm, intid, level);
530 TEST_ASSERT(ret != 0 && errno == EINVAL,
531 "Bad intid %i did not cause KVM_IRQ_LINE "
532 "error: rc: %i errno: %i", intid, ret, errno);
533 }
534 }
535
kvm_irq_set_level_info_check(int gic_fd,uint32_t intid,int level,bool expect_failure)536 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
537 bool expect_failure)
538 {
539 if (!expect_failure) {
540 kvm_irq_set_level_info(gic_fd, intid, level);
541 } else {
542 int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
543 /*
544 * The kernel silently fails for invalid SPIs and SGIs (which
545 * are not level-sensitive). It only checks for intid to not
546 * spill over 1U << 10 (the max reserved SPI). Also, callers
547 * are supposed to mask the intid with 0x3ff (1023).
548 */
549 if (intid > VGIC_MAX_RESERVED)
550 TEST_ASSERT(ret != 0 && errno == EINVAL,
551 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
552 "error: rc: %i errno: %i", intid, ret, errno);
553 else
554 TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
555 "for intid %i failed, rc: %i errno: %i",
556 intid, ret, errno);
557 }
558 }
559
kvm_set_gsi_routing_irqchip_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)560 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
561 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
562 bool expect_failure)
563 {
564 struct kvm_irq_routing *routing;
565 int ret;
566 uint64_t i;
567
568 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
569
570 routing = kvm_gsi_routing_create();
571 for (i = intid; i < (uint64_t)intid + num; i++)
572 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
573
574 if (!expect_failure) {
575 kvm_gsi_routing_write(vm, routing);
576 } else {
577 ret = _kvm_gsi_routing_write(vm, routing);
578 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
579 if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
580 TEST_ASSERT(ret != 0 && errno == EINVAL,
581 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
582 "error: rc: %i errno: %i", intid, ret, errno);
583 else
584 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
585 "for intid %i failed, rc: %i errno: %i",
586 intid, ret, errno);
587 }
588 }
589
kvm_irq_write_ispendr_check(int gic_fd,uint32_t intid,struct kvm_vcpu * vcpu,bool expect_failure)590 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
591 struct kvm_vcpu *vcpu,
592 bool expect_failure)
593 {
594 /*
595 * Ignore this when expecting failure as invalid intids will lead to
596 * either trying to inject SGIs when we configured the test to be
597 * level_sensitive (or the reverse), or inject large intids which
598 * will lead to writing above the ISPENDR register space (and we
599 * don't want to do that either).
600 */
601 if (!expect_failure)
602 kvm_irq_write_ispendr(gic_fd, intid, vcpu);
603 }
604
kvm_routing_and_irqfd_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)605 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
606 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
607 bool expect_failure)
608 {
609 int fd[MAX_SPI];
610 uint64_t val;
611 int ret, f;
612 uint64_t i;
613
614 /*
615 * There is no way to try injecting an SGI or PPI as the interface
616 * starts counting from the first SPI (above the private ones), so just
617 * exit.
618 */
619 if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
620 return;
621
622 kvm_set_gsi_routing_irqchip_check(vm, intid, num,
623 kvm_max_routes, expect_failure);
624
625 /*
626 * If expect_failure, then just to inject anyway. These
627 * will silently fail. And in any case, the guest will check
628 * that no actual interrupt was injected for those cases.
629 */
630
631 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
632 fd[f] = eventfd(0, 0);
633 TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
634 }
635
636 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
637 struct kvm_irqfd irqfd = {
638 .fd = fd[f],
639 .gsi = i - MIN_SPI,
640 };
641 assert(i <= (uint64_t)UINT_MAX);
642 vm_ioctl(vm, KVM_IRQFD, &irqfd);
643 }
644
645 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
646 val = 1;
647 ret = write(fd[f], &val, sizeof(uint64_t));
648 TEST_ASSERT(ret == sizeof(uint64_t),
649 __KVM_SYSCALL_ERROR("write()", ret));
650 }
651
652 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
653 close(fd[f]);
654 }
655
656 /* handles the valid case: intid=0xffffffff num=1 */
657 #define for_each_intid(first, num, tmp, i) \
658 for ((tmp) = (i) = (first); \
659 (tmp) < (uint64_t)(first) + (uint64_t)(num); \
660 (tmp)++, (i)++)
661
run_guest_cmd(struct kvm_vcpu * vcpu,int gic_fd,struct kvm_inject_args * inject_args,struct test_args * test_args)662 static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
663 struct kvm_inject_args *inject_args,
664 struct test_args *test_args)
665 {
666 kvm_inject_cmd cmd = inject_args->cmd;
667 uint32_t intid = inject_args->first_intid;
668 uint32_t num = inject_args->num;
669 int level = inject_args->level;
670 bool expect_failure = inject_args->expect_failure;
671 struct kvm_vm *vm = vcpu->vm;
672 uint64_t tmp;
673 uint32_t i;
674
675 /* handles the valid case: intid=0xffffffff num=1 */
676 assert(intid < UINT_MAX - num || num == 1);
677
678 switch (cmd) {
679 case KVM_INJECT_EDGE_IRQ_LINE:
680 for_each_intid(intid, num, tmp, i)
681 kvm_irq_line_check(vm, i, 1, test_args,
682 expect_failure);
683 for_each_intid(intid, num, tmp, i)
684 kvm_irq_line_check(vm, i, 0, test_args,
685 expect_failure);
686 break;
687 case KVM_SET_IRQ_LINE:
688 for_each_intid(intid, num, tmp, i)
689 kvm_irq_line_check(vm, i, level, test_args,
690 expect_failure);
691 break;
692 case KVM_SET_IRQ_LINE_HIGH:
693 for_each_intid(intid, num, tmp, i)
694 kvm_irq_line_check(vm, i, 1, test_args,
695 expect_failure);
696 break;
697 case KVM_SET_LEVEL_INFO_HIGH:
698 for_each_intid(intid, num, tmp, i)
699 kvm_irq_set_level_info_check(gic_fd, i, 1,
700 expect_failure);
701 break;
702 case KVM_INJECT_IRQFD:
703 kvm_routing_and_irqfd_check(vm, intid, num,
704 test_args->kvm_max_routes,
705 expect_failure);
706 break;
707 case KVM_WRITE_ISPENDR:
708 for (i = intid; i < intid + num; i++)
709 kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
710 expect_failure);
711 break;
712 case KVM_WRITE_ISACTIVER:
713 for (i = intid; i < intid + num; i++)
714 kvm_irq_write_isactiver(gic_fd, i, vcpu);
715 break;
716 default:
717 break;
718 }
719 }
720
kvm_inject_get_call(struct kvm_vm * vm,struct ucall * uc,struct kvm_inject_args * args)721 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
722 struct kvm_inject_args *args)
723 {
724 struct kvm_inject_args *kvm_args_hva;
725 vm_vaddr_t kvm_args_gva;
726
727 kvm_args_gva = uc->args[1];
728 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
729 memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
730 }
731
print_args(struct test_args * args)732 static void print_args(struct test_args *args)
733 {
734 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
735 args->nr_irqs, args->level_sensitive,
736 args->eoi_split);
737 }
738
test_vgic(uint32_t nr_irqs,bool level_sensitive,bool eoi_split)739 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
740 {
741 struct ucall uc;
742 int gic_fd;
743 struct kvm_vcpu *vcpu;
744 struct kvm_vm *vm;
745 struct kvm_inject_args inject_args;
746 vm_vaddr_t args_gva;
747
748 struct test_args args = {
749 .nr_irqs = nr_irqs,
750 .level_sensitive = level_sensitive,
751 .eoi_split = eoi_split,
752 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
753 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
754 };
755
756 print_args(&args);
757
758 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
759 ucall_init(vm, NULL);
760
761 vm_init_descriptor_tables(vm);
762 vcpu_init_descriptor_tables(vcpu);
763
764 /* Setup the guest args page (so it gets the args). */
765 args_gva = vm_vaddr_alloc_page(vm);
766 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
767 vcpu_args_set(vcpu, 1, args_gva);
768
769 gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
770 GICD_BASE_GPA, GICR_BASE_GPA);
771 __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
772
773 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
774 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
775
776 while (1) {
777 vcpu_run(vcpu);
778
779 switch (get_ucall(vcpu, &uc)) {
780 case UCALL_SYNC:
781 kvm_inject_get_call(vm, &uc, &inject_args);
782 run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
783 break;
784 case UCALL_ABORT:
785 REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
786 break;
787 case UCALL_DONE:
788 goto done;
789 default:
790 TEST_FAIL("Unknown ucall %lu", uc.cmd);
791 }
792 }
793
794 done:
795 close(gic_fd);
796 kvm_vm_free(vm);
797 }
798
help(const char * name)799 static void help(const char *name)
800 {
801 printf(
802 "\n"
803 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
804 printf(" -n: specify number of IRQs to setup the vgic with. "
805 "It has to be a multiple of 32 and between 64 and 1024.\n");
806 printf(" -e: if 1 then EOI is split into a write to DIR on top "
807 "of writing EOI.\n");
808 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
809 puts("");
810 exit(1);
811 }
812
main(int argc,char ** argv)813 int main(int argc, char **argv)
814 {
815 uint32_t nr_irqs = 64;
816 bool default_args = true;
817 bool level_sensitive = false;
818 int opt;
819 bool eoi_split = false;
820
821 /* Tell stdout not to buffer its content */
822 setbuf(stdout, NULL);
823
824 while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
825 switch (opt) {
826 case 'n':
827 nr_irqs = atoi(optarg);
828 if (nr_irqs > 1024 || nr_irqs % 32)
829 help(argv[0]);
830 break;
831 case 'e':
832 eoi_split = (bool)atoi(optarg);
833 default_args = false;
834 break;
835 case 'l':
836 level_sensitive = (bool)atoi(optarg);
837 default_args = false;
838 break;
839 case 'h':
840 default:
841 help(argv[0]);
842 break;
843 }
844 }
845
846 /*
847 * If the user just specified nr_irqs and/or gic_version, then run all
848 * combinations.
849 */
850 if (default_args) {
851 test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
852 test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
853 test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
854 test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
855 } else {
856 test_vgic(nr_irqs, level_sensitive, eoi_split);
857 }
858
859 return 0;
860 }
861