1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check for KVM_GET_REG_LIST regressions.
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 *
7 * When attempting to migrate from a host with an older kernel to a host
8 * with a newer kernel we allow the newer kernel on the destination to
9 * list new registers with get-reg-list. We assume they'll be unused, at
10 * least until the guest reboots, and so they're relatively harmless.
11 * However, if the destination host with the newer kernel is missing
12 * registers which the source host with the older kernel has, then that's
13 * a regression in get-reg-list. This test checks for that regression by
14 * checking the current list against a blessed list. We should never have
15 * missing registers, but if new ones appear then they can probably be
16 * added to the blessed list. A completely new blessed list can be created
17 * by running the test with the --list command line argument.
18 *
19 * Note, the blessed list should be created from the oldest possible
20 * kernel. We can't go older than v4.15, though, because that's the first
21 * release to expose the ID system registers in KVM_GET_REG_LIST, see
22 * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
23 * from guests"). Also, one must use the --core-reg-fixup command line
24 * option when running on an older kernel that doesn't include df205b5c6328
25 * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
26 */
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include "kvm_util.h"
34 #include "test_util.h"
35 #include "processor.h"
36
37 static struct kvm_reg_list *reg_list;
38 static __u64 *blessed_reg, blessed_n;
39
40 struct reg_sublist {
41 const char *name;
42 long capability;
43 int feature;
44 bool finalize;
45 __u64 *regs;
46 __u64 regs_n;
47 __u64 *rejects_set;
48 __u64 rejects_set_n;
49 };
50
51 struct vcpu_config {
52 char *name;
53 struct reg_sublist sublists[];
54 };
55
56 static struct vcpu_config *vcpu_configs[];
57 static int vcpu_configs_n;
58
59 #define for_each_sublist(c, s) \
60 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
61
62 #define for_each_reg(i) \
63 for ((i) = 0; (i) < reg_list->n; ++(i))
64
65 #define for_each_reg_filtered(i) \
66 for_each_reg(i) \
67 if (!filter_reg(reg_list->reg[i]))
68
69 #define for_each_missing_reg(i) \
70 for ((i) = 0; (i) < blessed_n; ++(i)) \
71 if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
72
73 #define for_each_new_reg(i) \
74 for_each_reg_filtered(i) \
75 if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
76
config_name(struct vcpu_config * c)77 static const char *config_name(struct vcpu_config *c)
78 {
79 struct reg_sublist *s;
80 int len = 0;
81
82 if (c->name)
83 return c->name;
84
85 for_each_sublist(c, s)
86 len += strlen(s->name) + 1;
87
88 c->name = malloc(len);
89
90 len = 0;
91 for_each_sublist(c, s) {
92 if (!strcmp(s->name, "base"))
93 continue;
94 strcat(c->name + len, s->name);
95 len += strlen(s->name) + 1;
96 c->name[len - 1] = '+';
97 }
98 c->name[len - 1] = '\0';
99
100 return c->name;
101 }
102
has_cap(struct vcpu_config * c,long capability)103 static bool has_cap(struct vcpu_config *c, long capability)
104 {
105 struct reg_sublist *s;
106
107 for_each_sublist(c, s)
108 if (s->capability == capability)
109 return true;
110 return false;
111 }
112
filter_reg(__u64 reg)113 static bool filter_reg(__u64 reg)
114 {
115 /*
116 * DEMUX register presence depends on the host's CLIDR_EL1.
117 * This means there's no set of them that we can bless.
118 */
119 if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
120 return true;
121
122 return false;
123 }
124
find_reg(__u64 regs[],__u64 nr_regs,__u64 reg)125 static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
126 {
127 int i;
128
129 for (i = 0; i < nr_regs; ++i)
130 if (reg == regs[i])
131 return true;
132 return false;
133 }
134
str_with_index(const char * template,__u64 index)135 static const char *str_with_index(const char *template, __u64 index)
136 {
137 char *str, *p;
138 int n;
139
140 str = strdup(template);
141 p = strstr(str, "##");
142 n = sprintf(p, "%lld", index);
143 strcat(p + n, strstr(template, "##") + 2);
144
145 return (const char *)str;
146 }
147
148 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
149
150 #define CORE_REGS_XX_NR_WORDS 2
151 #define CORE_SPSR_XX_NR_WORDS 2
152 #define CORE_FPREGS_XX_NR_WORDS 4
153
core_id_to_str(struct vcpu_config * c,__u64 id)154 static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
155 {
156 __u64 core_off = id & ~REG_MASK, idx;
157
158 /*
159 * core_off is the offset into struct kvm_regs
160 */
161 switch (core_off) {
162 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
163 KVM_REG_ARM_CORE_REG(regs.regs[30]):
164 idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
165 TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
166 return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
167 case KVM_REG_ARM_CORE_REG(regs.sp):
168 return "KVM_REG_ARM_CORE_REG(regs.sp)";
169 case KVM_REG_ARM_CORE_REG(regs.pc):
170 return "KVM_REG_ARM_CORE_REG(regs.pc)";
171 case KVM_REG_ARM_CORE_REG(regs.pstate):
172 return "KVM_REG_ARM_CORE_REG(regs.pstate)";
173 case KVM_REG_ARM_CORE_REG(sp_el1):
174 return "KVM_REG_ARM_CORE_REG(sp_el1)";
175 case KVM_REG_ARM_CORE_REG(elr_el1):
176 return "KVM_REG_ARM_CORE_REG(elr_el1)";
177 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
178 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
179 idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
180 TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
181 return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
182 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
183 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
184 idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
185 TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
186 return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
187 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
188 return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
189 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
190 return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
191 }
192
193 TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
194 return NULL;
195 }
196
sve_id_to_str(struct vcpu_config * c,__u64 id)197 static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
198 {
199 __u64 sve_off, n, i;
200
201 if (id == KVM_REG_ARM64_SVE_VLS)
202 return "KVM_REG_ARM64_SVE_VLS";
203
204 sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
205 i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
206
207 TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
208
209 switch (sve_off) {
210 case KVM_REG_ARM64_SVE_ZREG_BASE ...
211 KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
212 n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
213 TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
214 "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
215 return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
216 case KVM_REG_ARM64_SVE_PREG_BASE ...
217 KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
218 n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
219 TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
220 "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
221 return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
222 case KVM_REG_ARM64_SVE_FFR_BASE:
223 TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
224 "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
225 return "KVM_REG_ARM64_SVE_FFR(0)";
226 }
227
228 return NULL;
229 }
230
print_reg(struct vcpu_config * c,__u64 id)231 static void print_reg(struct vcpu_config *c, __u64 id)
232 {
233 unsigned op0, op1, crn, crm, op2;
234 const char *reg_size = NULL;
235
236 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
237 "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
238
239 switch (id & KVM_REG_SIZE_MASK) {
240 case KVM_REG_SIZE_U8:
241 reg_size = "KVM_REG_SIZE_U8";
242 break;
243 case KVM_REG_SIZE_U16:
244 reg_size = "KVM_REG_SIZE_U16";
245 break;
246 case KVM_REG_SIZE_U32:
247 reg_size = "KVM_REG_SIZE_U32";
248 break;
249 case KVM_REG_SIZE_U64:
250 reg_size = "KVM_REG_SIZE_U64";
251 break;
252 case KVM_REG_SIZE_U128:
253 reg_size = "KVM_REG_SIZE_U128";
254 break;
255 case KVM_REG_SIZE_U256:
256 reg_size = "KVM_REG_SIZE_U256";
257 break;
258 case KVM_REG_SIZE_U512:
259 reg_size = "KVM_REG_SIZE_U512";
260 break;
261 case KVM_REG_SIZE_U1024:
262 reg_size = "KVM_REG_SIZE_U1024";
263 break;
264 case KVM_REG_SIZE_U2048:
265 reg_size = "KVM_REG_SIZE_U2048";
266 break;
267 default:
268 TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
269 config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
270 }
271
272 switch (id & KVM_REG_ARM_COPROC_MASK) {
273 case KVM_REG_ARM_CORE:
274 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
275 break;
276 case KVM_REG_ARM_DEMUX:
277 TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
278 "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
279 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
280 reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
281 break;
282 case KVM_REG_ARM64_SYSREG:
283 op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
284 op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
285 crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
286 crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
287 op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
288 TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
289 "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
290 printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
291 break;
292 case KVM_REG_ARM_FW:
293 TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
294 "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
295 printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
296 break;
297 case KVM_REG_ARM_FW_FEAT_BMAP:
298 TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
299 "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
300 printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
301 break;
302 case KVM_REG_ARM64_SVE:
303 if (has_cap(c, KVM_CAP_ARM_SVE))
304 printf("\t%s,\n", sve_id_to_str(c, id));
305 else
306 TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
307 break;
308 default:
309 TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
310 config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
311 }
312 }
313
314 /*
315 * Older kernels listed each 32-bit word of CORE registers separately.
316 * For 64 and 128-bit registers we need to ignore the extra words. We
317 * also need to fixup the sizes, because the older kernels stated all
318 * registers were 64-bit, even when they weren't.
319 */
core_reg_fixup(void)320 static void core_reg_fixup(void)
321 {
322 struct kvm_reg_list *tmp;
323 __u64 id, core_off;
324 int i;
325
326 tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
327
328 for (i = 0; i < reg_list->n; ++i) {
329 id = reg_list->reg[i];
330
331 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
332 tmp->reg[tmp->n++] = id;
333 continue;
334 }
335
336 core_off = id & ~REG_MASK;
337
338 switch (core_off) {
339 case 0x52: case 0xd2: case 0xd6:
340 /*
341 * These offsets are pointing at padding.
342 * We need to ignore them too.
343 */
344 continue;
345 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
346 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
347 if (core_off & 3)
348 continue;
349 id &= ~KVM_REG_SIZE_MASK;
350 id |= KVM_REG_SIZE_U128;
351 tmp->reg[tmp->n++] = id;
352 continue;
353 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
354 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
355 id &= ~KVM_REG_SIZE_MASK;
356 id |= KVM_REG_SIZE_U32;
357 tmp->reg[tmp->n++] = id;
358 continue;
359 default:
360 if (core_off & 1)
361 continue;
362 tmp->reg[tmp->n++] = id;
363 break;
364 }
365 }
366
367 free(reg_list);
368 reg_list = tmp;
369 }
370
prepare_vcpu_init(struct vcpu_config * c,struct kvm_vcpu_init * init)371 static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
372 {
373 struct reg_sublist *s;
374
375 for_each_sublist(c, s)
376 if (s->capability)
377 init->features[s->feature / 32] |= 1 << (s->feature % 32);
378 }
379
finalize_vcpu(struct kvm_vcpu * vcpu,struct vcpu_config * c)380 static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
381 {
382 struct reg_sublist *s;
383 int feature;
384
385 for_each_sublist(c, s) {
386 if (s->finalize) {
387 feature = s->feature;
388 vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
389 }
390 }
391 }
392
check_supported(struct vcpu_config * c)393 static void check_supported(struct vcpu_config *c)
394 {
395 struct reg_sublist *s;
396
397 for_each_sublist(c, s) {
398 if (!s->capability)
399 continue;
400
401 __TEST_REQUIRE(kvm_has_cap(s->capability),
402 "%s: %s not available, skipping tests\n",
403 config_name(c), s->name);
404 }
405 }
406
407 static bool print_list;
408 static bool print_filtered;
409 static bool fixup_core_regs;
410
run_test(struct vcpu_config * c)411 static void run_test(struct vcpu_config *c)
412 {
413 struct kvm_vcpu_init init = { .target = -1, };
414 int new_regs = 0, missing_regs = 0, i, n;
415 int failed_get = 0, failed_set = 0, failed_reject = 0;
416 struct kvm_vcpu *vcpu;
417 struct kvm_vm *vm;
418 struct reg_sublist *s;
419
420 check_supported(c);
421
422 vm = vm_create_barebones();
423 prepare_vcpu_init(c, &init);
424 vcpu = __vm_vcpu_add(vm, 0);
425 aarch64_vcpu_setup(vcpu, &init);
426 finalize_vcpu(vcpu, c);
427
428 reg_list = vcpu_get_reg_list(vcpu);
429
430 if (fixup_core_regs)
431 core_reg_fixup();
432
433 if (print_list || print_filtered) {
434 putchar('\n');
435 for_each_reg(i) {
436 __u64 id = reg_list->reg[i];
437 if ((print_list && !filter_reg(id)) ||
438 (print_filtered && filter_reg(id)))
439 print_reg(c, id);
440 }
441 putchar('\n');
442 return;
443 }
444
445 /*
446 * We only test that we can get the register and then write back the
447 * same value. Some registers may allow other values to be written
448 * back, but others only allow some bits to be changed, and at least
449 * for ID registers set will fail if the value does not exactly match
450 * what was returned by get. If registers that allow other values to
451 * be written need to have the other values tested, then we should
452 * create a new set of tests for those in a new independent test
453 * executable.
454 */
455 for_each_reg(i) {
456 uint8_t addr[2048 / 8];
457 struct kvm_one_reg reg = {
458 .id = reg_list->reg[i],
459 .addr = (__u64)&addr,
460 };
461 bool reject_reg = false;
462 int ret;
463
464 ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
465 if (ret) {
466 printf("%s: Failed to get ", config_name(c));
467 print_reg(c, reg.id);
468 putchar('\n');
469 ++failed_get;
470 }
471
472 /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
473 for_each_sublist(c, s) {
474 if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
475 reject_reg = true;
476 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
477 if (ret != -1 || errno != EPERM) {
478 printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
479 print_reg(c, reg.id);
480 putchar('\n');
481 ++failed_reject;
482 }
483 break;
484 }
485 }
486
487 if (!reject_reg) {
488 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
489 if (ret) {
490 printf("%s: Failed to set ", config_name(c));
491 print_reg(c, reg.id);
492 putchar('\n');
493 ++failed_set;
494 }
495 }
496 }
497
498 for_each_sublist(c, s)
499 blessed_n += s->regs_n;
500 blessed_reg = calloc(blessed_n, sizeof(__u64));
501
502 n = 0;
503 for_each_sublist(c, s) {
504 for (i = 0; i < s->regs_n; ++i)
505 blessed_reg[n++] = s->regs[i];
506 }
507
508 for_each_new_reg(i)
509 ++new_regs;
510
511 for_each_missing_reg(i)
512 ++missing_regs;
513
514 if (new_regs || missing_regs) {
515 n = 0;
516 for_each_reg_filtered(i)
517 ++n;
518
519 printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
520 printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
521 config_name(c), reg_list->n, reg_list->n - n);
522 }
523
524 if (new_regs) {
525 printf("\n%s: There are %d new registers.\n"
526 "Consider adding them to the blessed reg "
527 "list with the following lines:\n\n", config_name(c), new_regs);
528 for_each_new_reg(i)
529 print_reg(c, reg_list->reg[i]);
530 putchar('\n');
531 }
532
533 if (missing_regs) {
534 printf("\n%s: There are %d missing registers.\n"
535 "The following lines are missing registers:\n\n", config_name(c), missing_regs);
536 for_each_missing_reg(i)
537 print_reg(c, blessed_reg[i]);
538 putchar('\n');
539 }
540
541 TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
542 "%s: There are %d missing registers; "
543 "%d registers failed get; %d registers failed set; %d registers failed reject",
544 config_name(c), missing_regs, failed_get, failed_set, failed_reject);
545
546 pr_info("%s: PASS\n", config_name(c));
547 blessed_n = 0;
548 free(blessed_reg);
549 free(reg_list);
550 kvm_vm_free(vm);
551 }
552
help(void)553 static void help(void)
554 {
555 struct vcpu_config *c;
556 int i;
557
558 printf(
559 "\n"
560 "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
561 " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
562 " '<selection>' may be\n");
563
564 for (i = 0; i < vcpu_configs_n; ++i) {
565 c = vcpu_configs[i];
566 printf(
567 " '%s'\n", config_name(c));
568 }
569
570 printf(
571 "\n"
572 " --list Print the register list rather than test it (requires --config)\n"
573 " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
574 " --core-reg-fixup Needed when running on old kernels with broken core reg listings\n"
575 "\n"
576 );
577 }
578
parse_config(const char * config)579 static struct vcpu_config *parse_config(const char *config)
580 {
581 struct vcpu_config *c;
582 int i;
583
584 if (config[8] != '=')
585 help(), exit(1);
586
587 for (i = 0; i < vcpu_configs_n; ++i) {
588 c = vcpu_configs[i];
589 if (strcmp(config_name(c), &config[9]) == 0)
590 break;
591 }
592
593 if (i == vcpu_configs_n)
594 help(), exit(1);
595
596 return c;
597 }
598
main(int ac,char ** av)599 int main(int ac, char **av)
600 {
601 struct vcpu_config *c, *sel = NULL;
602 int i, ret = 0;
603 pid_t pid;
604
605 for (i = 1; i < ac; ++i) {
606 if (strcmp(av[i], "--core-reg-fixup") == 0)
607 fixup_core_regs = true;
608 else if (strncmp(av[i], "--config", 8) == 0)
609 sel = parse_config(av[i]);
610 else if (strcmp(av[i], "--list") == 0)
611 print_list = true;
612 else if (strcmp(av[i], "--list-filtered") == 0)
613 print_filtered = true;
614 else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
615 help(), exit(0);
616 else
617 help(), exit(1);
618 }
619
620 if (print_list || print_filtered) {
621 /*
622 * We only want to print the register list of a single config.
623 */
624 if (!sel)
625 help(), exit(1);
626 }
627
628 for (i = 0; i < vcpu_configs_n; ++i) {
629 c = vcpu_configs[i];
630 if (sel && c != sel)
631 continue;
632
633 pid = fork();
634
635 if (!pid) {
636 run_test(c);
637 exit(0);
638 } else {
639 int wstatus;
640 pid_t wpid = wait(&wstatus);
641 TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
642 if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
643 ret = KSFT_FAIL;
644 }
645 }
646
647 return ret;
648 }
649
650 /*
651 * The current blessed list was primed with the output of kernel version
652 * v4.15 with --core-reg-fixup and then later updated with new registers.
653 *
654 * The blessed list is up to date with kernel version v5.13-rc3
655 */
656 static __u64 base_regs[] = {
657 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
658 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
659 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
660 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
661 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
662 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
663 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
664 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
665 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
666 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
667 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
668 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
669 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
670 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
671 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
672 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
673 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
674 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
675 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
676 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
677 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
678 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
679 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
680 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
681 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
682 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
683 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
684 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
685 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
686 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
687 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
688 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
689 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
690 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
691 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
692 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
693 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
694 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
695 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
696 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
697 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
698 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
699 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
700 KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
701 KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
702 KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
703 KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
704 KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
705 KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
706 KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
707 ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
708 ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
709 ARM64_SYS_REG(3, 3, 14, 0, 2),
710 ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
711 ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
712 ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
713 ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
714 ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
715 ARM64_SYS_REG(2, 0, 0, 0, 4),
716 ARM64_SYS_REG(2, 0, 0, 0, 5),
717 ARM64_SYS_REG(2, 0, 0, 0, 6),
718 ARM64_SYS_REG(2, 0, 0, 0, 7),
719 ARM64_SYS_REG(2, 0, 0, 1, 4),
720 ARM64_SYS_REG(2, 0, 0, 1, 5),
721 ARM64_SYS_REG(2, 0, 0, 1, 6),
722 ARM64_SYS_REG(2, 0, 0, 1, 7),
723 ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
724 ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
725 ARM64_SYS_REG(2, 0, 0, 2, 4),
726 ARM64_SYS_REG(2, 0, 0, 2, 5),
727 ARM64_SYS_REG(2, 0, 0, 2, 6),
728 ARM64_SYS_REG(2, 0, 0, 2, 7),
729 ARM64_SYS_REG(2, 0, 0, 3, 4),
730 ARM64_SYS_REG(2, 0, 0, 3, 5),
731 ARM64_SYS_REG(2, 0, 0, 3, 6),
732 ARM64_SYS_REG(2, 0, 0, 3, 7),
733 ARM64_SYS_REG(2, 0, 0, 4, 4),
734 ARM64_SYS_REG(2, 0, 0, 4, 5),
735 ARM64_SYS_REG(2, 0, 0, 4, 6),
736 ARM64_SYS_REG(2, 0, 0, 4, 7),
737 ARM64_SYS_REG(2, 0, 0, 5, 4),
738 ARM64_SYS_REG(2, 0, 0, 5, 5),
739 ARM64_SYS_REG(2, 0, 0, 5, 6),
740 ARM64_SYS_REG(2, 0, 0, 5, 7),
741 ARM64_SYS_REG(2, 0, 0, 6, 4),
742 ARM64_SYS_REG(2, 0, 0, 6, 5),
743 ARM64_SYS_REG(2, 0, 0, 6, 6),
744 ARM64_SYS_REG(2, 0, 0, 6, 7),
745 ARM64_SYS_REG(2, 0, 0, 7, 4),
746 ARM64_SYS_REG(2, 0, 0, 7, 5),
747 ARM64_SYS_REG(2, 0, 0, 7, 6),
748 ARM64_SYS_REG(2, 0, 0, 7, 7),
749 ARM64_SYS_REG(2, 0, 0, 8, 4),
750 ARM64_SYS_REG(2, 0, 0, 8, 5),
751 ARM64_SYS_REG(2, 0, 0, 8, 6),
752 ARM64_SYS_REG(2, 0, 0, 8, 7),
753 ARM64_SYS_REG(2, 0, 0, 9, 4),
754 ARM64_SYS_REG(2, 0, 0, 9, 5),
755 ARM64_SYS_REG(2, 0, 0, 9, 6),
756 ARM64_SYS_REG(2, 0, 0, 9, 7),
757 ARM64_SYS_REG(2, 0, 0, 10, 4),
758 ARM64_SYS_REG(2, 0, 0, 10, 5),
759 ARM64_SYS_REG(2, 0, 0, 10, 6),
760 ARM64_SYS_REG(2, 0, 0, 10, 7),
761 ARM64_SYS_REG(2, 0, 0, 11, 4),
762 ARM64_SYS_REG(2, 0, 0, 11, 5),
763 ARM64_SYS_REG(2, 0, 0, 11, 6),
764 ARM64_SYS_REG(2, 0, 0, 11, 7),
765 ARM64_SYS_REG(2, 0, 0, 12, 4),
766 ARM64_SYS_REG(2, 0, 0, 12, 5),
767 ARM64_SYS_REG(2, 0, 0, 12, 6),
768 ARM64_SYS_REG(2, 0, 0, 12, 7),
769 ARM64_SYS_REG(2, 0, 0, 13, 4),
770 ARM64_SYS_REG(2, 0, 0, 13, 5),
771 ARM64_SYS_REG(2, 0, 0, 13, 6),
772 ARM64_SYS_REG(2, 0, 0, 13, 7),
773 ARM64_SYS_REG(2, 0, 0, 14, 4),
774 ARM64_SYS_REG(2, 0, 0, 14, 5),
775 ARM64_SYS_REG(2, 0, 0, 14, 6),
776 ARM64_SYS_REG(2, 0, 0, 14, 7),
777 ARM64_SYS_REG(2, 0, 0, 15, 4),
778 ARM64_SYS_REG(2, 0, 0, 15, 5),
779 ARM64_SYS_REG(2, 0, 0, 15, 6),
780 ARM64_SYS_REG(2, 0, 0, 15, 7),
781 ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */
782 ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
783 ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
784 ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
785 ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
786 ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
787 ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
788 ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
789 ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
790 ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
791 ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
792 ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
793 ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
794 ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
795 ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
796 ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
797 ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
798 ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
799 ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
800 ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
801 ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
802 ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
803 ARM64_SYS_REG(3, 0, 0, 3, 3),
804 ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
805 ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
806 ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
807 ARM64_SYS_REG(3, 0, 0, 3, 7),
808 ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
809 ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
810 ARM64_SYS_REG(3, 0, 0, 4, 2),
811 ARM64_SYS_REG(3, 0, 0, 4, 3),
812 ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
813 ARM64_SYS_REG(3, 0, 0, 4, 5),
814 ARM64_SYS_REG(3, 0, 0, 4, 6),
815 ARM64_SYS_REG(3, 0, 0, 4, 7),
816 ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
817 ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
818 ARM64_SYS_REG(3, 0, 0, 5, 2),
819 ARM64_SYS_REG(3, 0, 0, 5, 3),
820 ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
821 ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
822 ARM64_SYS_REG(3, 0, 0, 5, 6),
823 ARM64_SYS_REG(3, 0, 0, 5, 7),
824 ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
825 ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
826 ARM64_SYS_REG(3, 0, 0, 6, 2),
827 ARM64_SYS_REG(3, 0, 0, 6, 3),
828 ARM64_SYS_REG(3, 0, 0, 6, 4),
829 ARM64_SYS_REG(3, 0, 0, 6, 5),
830 ARM64_SYS_REG(3, 0, 0, 6, 6),
831 ARM64_SYS_REG(3, 0, 0, 6, 7),
832 ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
833 ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
834 ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
835 ARM64_SYS_REG(3, 0, 0, 7, 3),
836 ARM64_SYS_REG(3, 0, 0, 7, 4),
837 ARM64_SYS_REG(3, 0, 0, 7, 5),
838 ARM64_SYS_REG(3, 0, 0, 7, 6),
839 ARM64_SYS_REG(3, 0, 0, 7, 7),
840 ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
841 ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
842 ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
843 ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
844 ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
845 ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
846 ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
847 ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
848 ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
849 ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
850 ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
851 ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
852 ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
853 ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
854 ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
855 ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
856 ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
857 ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
858 ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
859 ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
860 ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
861 ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
862 ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
863 ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
864 };
865
866 static __u64 pmu_regs[] = {
867 ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
868 ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
869 ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
870 ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
871 ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
872 ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
873 ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
874 ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
875 ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
876 ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
877 ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
878 ARM64_SYS_REG(3, 3, 14, 8, 0),
879 ARM64_SYS_REG(3, 3, 14, 8, 1),
880 ARM64_SYS_REG(3, 3, 14, 8, 2),
881 ARM64_SYS_REG(3, 3, 14, 8, 3),
882 ARM64_SYS_REG(3, 3, 14, 8, 4),
883 ARM64_SYS_REG(3, 3, 14, 8, 5),
884 ARM64_SYS_REG(3, 3, 14, 8, 6),
885 ARM64_SYS_REG(3, 3, 14, 8, 7),
886 ARM64_SYS_REG(3, 3, 14, 9, 0),
887 ARM64_SYS_REG(3, 3, 14, 9, 1),
888 ARM64_SYS_REG(3, 3, 14, 9, 2),
889 ARM64_SYS_REG(3, 3, 14, 9, 3),
890 ARM64_SYS_REG(3, 3, 14, 9, 4),
891 ARM64_SYS_REG(3, 3, 14, 9, 5),
892 ARM64_SYS_REG(3, 3, 14, 9, 6),
893 ARM64_SYS_REG(3, 3, 14, 9, 7),
894 ARM64_SYS_REG(3, 3, 14, 10, 0),
895 ARM64_SYS_REG(3, 3, 14, 10, 1),
896 ARM64_SYS_REG(3, 3, 14, 10, 2),
897 ARM64_SYS_REG(3, 3, 14, 10, 3),
898 ARM64_SYS_REG(3, 3, 14, 10, 4),
899 ARM64_SYS_REG(3, 3, 14, 10, 5),
900 ARM64_SYS_REG(3, 3, 14, 10, 6),
901 ARM64_SYS_REG(3, 3, 14, 10, 7),
902 ARM64_SYS_REG(3, 3, 14, 11, 0),
903 ARM64_SYS_REG(3, 3, 14, 11, 1),
904 ARM64_SYS_REG(3, 3, 14, 11, 2),
905 ARM64_SYS_REG(3, 3, 14, 11, 3),
906 ARM64_SYS_REG(3, 3, 14, 11, 4),
907 ARM64_SYS_REG(3, 3, 14, 11, 5),
908 ARM64_SYS_REG(3, 3, 14, 11, 6),
909 ARM64_SYS_REG(3, 3, 14, 12, 0),
910 ARM64_SYS_REG(3, 3, 14, 12, 1),
911 ARM64_SYS_REG(3, 3, 14, 12, 2),
912 ARM64_SYS_REG(3, 3, 14, 12, 3),
913 ARM64_SYS_REG(3, 3, 14, 12, 4),
914 ARM64_SYS_REG(3, 3, 14, 12, 5),
915 ARM64_SYS_REG(3, 3, 14, 12, 6),
916 ARM64_SYS_REG(3, 3, 14, 12, 7),
917 ARM64_SYS_REG(3, 3, 14, 13, 0),
918 ARM64_SYS_REG(3, 3, 14, 13, 1),
919 ARM64_SYS_REG(3, 3, 14, 13, 2),
920 ARM64_SYS_REG(3, 3, 14, 13, 3),
921 ARM64_SYS_REG(3, 3, 14, 13, 4),
922 ARM64_SYS_REG(3, 3, 14, 13, 5),
923 ARM64_SYS_REG(3, 3, 14, 13, 6),
924 ARM64_SYS_REG(3, 3, 14, 13, 7),
925 ARM64_SYS_REG(3, 3, 14, 14, 0),
926 ARM64_SYS_REG(3, 3, 14, 14, 1),
927 ARM64_SYS_REG(3, 3, 14, 14, 2),
928 ARM64_SYS_REG(3, 3, 14, 14, 3),
929 ARM64_SYS_REG(3, 3, 14, 14, 4),
930 ARM64_SYS_REG(3, 3, 14, 14, 5),
931 ARM64_SYS_REG(3, 3, 14, 14, 6),
932 ARM64_SYS_REG(3, 3, 14, 14, 7),
933 ARM64_SYS_REG(3, 3, 14, 15, 0),
934 ARM64_SYS_REG(3, 3, 14, 15, 1),
935 ARM64_SYS_REG(3, 3, 14, 15, 2),
936 ARM64_SYS_REG(3, 3, 14, 15, 3),
937 ARM64_SYS_REG(3, 3, 14, 15, 4),
938 ARM64_SYS_REG(3, 3, 14, 15, 5),
939 ARM64_SYS_REG(3, 3, 14, 15, 6),
940 ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
941 };
942
943 static __u64 vregs[] = {
944 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
945 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
946 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
947 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
948 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
949 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
950 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
951 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
952 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
953 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
954 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
955 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
956 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
957 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
958 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
959 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
960 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
961 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
962 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
963 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
964 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
965 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
966 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
967 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
968 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
969 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
970 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
971 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
972 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
973 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
974 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
975 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
976 };
977
978 static __u64 sve_regs[] = {
979 KVM_REG_ARM64_SVE_VLS,
980 KVM_REG_ARM64_SVE_ZREG(0, 0),
981 KVM_REG_ARM64_SVE_ZREG(1, 0),
982 KVM_REG_ARM64_SVE_ZREG(2, 0),
983 KVM_REG_ARM64_SVE_ZREG(3, 0),
984 KVM_REG_ARM64_SVE_ZREG(4, 0),
985 KVM_REG_ARM64_SVE_ZREG(5, 0),
986 KVM_REG_ARM64_SVE_ZREG(6, 0),
987 KVM_REG_ARM64_SVE_ZREG(7, 0),
988 KVM_REG_ARM64_SVE_ZREG(8, 0),
989 KVM_REG_ARM64_SVE_ZREG(9, 0),
990 KVM_REG_ARM64_SVE_ZREG(10, 0),
991 KVM_REG_ARM64_SVE_ZREG(11, 0),
992 KVM_REG_ARM64_SVE_ZREG(12, 0),
993 KVM_REG_ARM64_SVE_ZREG(13, 0),
994 KVM_REG_ARM64_SVE_ZREG(14, 0),
995 KVM_REG_ARM64_SVE_ZREG(15, 0),
996 KVM_REG_ARM64_SVE_ZREG(16, 0),
997 KVM_REG_ARM64_SVE_ZREG(17, 0),
998 KVM_REG_ARM64_SVE_ZREG(18, 0),
999 KVM_REG_ARM64_SVE_ZREG(19, 0),
1000 KVM_REG_ARM64_SVE_ZREG(20, 0),
1001 KVM_REG_ARM64_SVE_ZREG(21, 0),
1002 KVM_REG_ARM64_SVE_ZREG(22, 0),
1003 KVM_REG_ARM64_SVE_ZREG(23, 0),
1004 KVM_REG_ARM64_SVE_ZREG(24, 0),
1005 KVM_REG_ARM64_SVE_ZREG(25, 0),
1006 KVM_REG_ARM64_SVE_ZREG(26, 0),
1007 KVM_REG_ARM64_SVE_ZREG(27, 0),
1008 KVM_REG_ARM64_SVE_ZREG(28, 0),
1009 KVM_REG_ARM64_SVE_ZREG(29, 0),
1010 KVM_REG_ARM64_SVE_ZREG(30, 0),
1011 KVM_REG_ARM64_SVE_ZREG(31, 0),
1012 KVM_REG_ARM64_SVE_PREG(0, 0),
1013 KVM_REG_ARM64_SVE_PREG(1, 0),
1014 KVM_REG_ARM64_SVE_PREG(2, 0),
1015 KVM_REG_ARM64_SVE_PREG(3, 0),
1016 KVM_REG_ARM64_SVE_PREG(4, 0),
1017 KVM_REG_ARM64_SVE_PREG(5, 0),
1018 KVM_REG_ARM64_SVE_PREG(6, 0),
1019 KVM_REG_ARM64_SVE_PREG(7, 0),
1020 KVM_REG_ARM64_SVE_PREG(8, 0),
1021 KVM_REG_ARM64_SVE_PREG(9, 0),
1022 KVM_REG_ARM64_SVE_PREG(10, 0),
1023 KVM_REG_ARM64_SVE_PREG(11, 0),
1024 KVM_REG_ARM64_SVE_PREG(12, 0),
1025 KVM_REG_ARM64_SVE_PREG(13, 0),
1026 KVM_REG_ARM64_SVE_PREG(14, 0),
1027 KVM_REG_ARM64_SVE_PREG(15, 0),
1028 KVM_REG_ARM64_SVE_FFR(0),
1029 ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
1030 };
1031
1032 static __u64 sve_rejects_set[] = {
1033 KVM_REG_ARM64_SVE_VLS,
1034 };
1035
1036 static __u64 pauth_addr_regs[] = {
1037 ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
1038 ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
1039 ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
1040 ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
1041 ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
1042 ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
1043 ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
1044 ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
1045 };
1046
1047 static __u64 pauth_generic_regs[] = {
1048 ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
1049 ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
1050 };
1051
1052 #define BASE_SUBLIST \
1053 { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
1054 #define VREGS_SUBLIST \
1055 { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
1056 #define PMU_SUBLIST \
1057 { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
1058 .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
1059 #define SVE_SUBLIST \
1060 { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
1061 .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
1062 .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
1063 #define PAUTH_SUBLIST \
1064 { \
1065 .name = "pauth_address", \
1066 .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
1067 .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
1068 .regs = pauth_addr_regs, \
1069 .regs_n = ARRAY_SIZE(pauth_addr_regs), \
1070 }, \
1071 { \
1072 .name = "pauth_generic", \
1073 .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
1074 .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
1075 .regs = pauth_generic_regs, \
1076 .regs_n = ARRAY_SIZE(pauth_generic_regs), \
1077 }
1078
1079 static struct vcpu_config vregs_config = {
1080 .sublists = {
1081 BASE_SUBLIST,
1082 VREGS_SUBLIST,
1083 {0},
1084 },
1085 };
1086 static struct vcpu_config vregs_pmu_config = {
1087 .sublists = {
1088 BASE_SUBLIST,
1089 VREGS_SUBLIST,
1090 PMU_SUBLIST,
1091 {0},
1092 },
1093 };
1094 static struct vcpu_config sve_config = {
1095 .sublists = {
1096 BASE_SUBLIST,
1097 SVE_SUBLIST,
1098 {0},
1099 },
1100 };
1101 static struct vcpu_config sve_pmu_config = {
1102 .sublists = {
1103 BASE_SUBLIST,
1104 SVE_SUBLIST,
1105 PMU_SUBLIST,
1106 {0},
1107 },
1108 };
1109 static struct vcpu_config pauth_config = {
1110 .sublists = {
1111 BASE_SUBLIST,
1112 VREGS_SUBLIST,
1113 PAUTH_SUBLIST,
1114 {0},
1115 },
1116 };
1117 static struct vcpu_config pauth_pmu_config = {
1118 .sublists = {
1119 BASE_SUBLIST,
1120 VREGS_SUBLIST,
1121 PAUTH_SUBLIST,
1122 PMU_SUBLIST,
1123 {0},
1124 },
1125 };
1126
1127 static struct vcpu_config *vcpu_configs[] = {
1128 &vregs_config,
1129 &vregs_pmu_config,
1130 &sve_config,
1131 &sve_pmu_config,
1132 &pauth_config,
1133 &pauth_pmu_config,
1134 };
1135 static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1136