1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <[email protected]>
5  */
6 
7 #ifndef __ARM64_KVM_NVHE_PKVM_H__
8 #define __ARM64_KVM_NVHE_PKVM_H__
9 
10 #include <asm/kvm_pkvm.h>
11 
12 #include <nvhe/gfp.h>
13 #include <nvhe/spinlock.h>
14 
15 /*
16  * Holds the relevant data for maintaining the vcpu state completely at hyp.
17  */
18 struct pkvm_hyp_vcpu {
19 	struct kvm_vcpu vcpu;
20 
21 	/* Backpointer to the host's (untrusted) vCPU instance. */
22 	struct kvm_vcpu *host_vcpu;
23 
24 	/*
25 	 * If this hyp vCPU is loaded, then this is a backpointer to the
26 	 * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
27 	 */
28 	struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
29 };
30 
31 /*
32  * Holds the relevant data for running a protected vm.
33  */
34 struct pkvm_hyp_vm {
35 	struct kvm kvm;
36 
37 	/* Backpointer to the host's (untrusted) KVM instance. */
38 	struct kvm *host_kvm;
39 
40 	/* The guest's stage-2 page-table managed by the hypervisor. */
41 	struct kvm_pgtable pgt;
42 	struct kvm_pgtable_mm_ops mm_ops;
43 	struct hyp_pool pool;
44 	hyp_spinlock_t lock;
45 
46 	/*
47 	 * The number of vcpus initialized and ready to run.
48 	 * Modifying this is protected by 'vm_table_lock'.
49 	 */
50 	unsigned int nr_vcpus;
51 
52 	/* Array of the hyp vCPU structures for this VM. */
53 	struct pkvm_hyp_vcpu *vcpus[];
54 };
55 
56 extern hyp_spinlock_t vm_table_lock;
57 
58 static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu * hyp_vcpu)59 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
60 {
61 	return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
62 }
63 
pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu * hyp_vcpu)64 static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
65 {
66 	return vcpu_is_protected(&hyp_vcpu->vcpu);
67 }
68 
pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm * hyp_vm)69 static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
70 {
71 	return kvm_vm_is_protected(&hyp_vm->kvm);
72 }
73 
74 void pkvm_hyp_vm_table_init(void *tbl);
75 
76 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
77 		   unsigned long pgd_hva);
78 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
79 		     unsigned long vcpu_hva);
80 int __pkvm_teardown_vm(pkvm_handle_t handle);
81 
82 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
83 					 unsigned int vcpu_idx);
84 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
85 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
86 
87 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
88 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
89 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
90 
91 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
92 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
93 void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
94 int kvm_check_pvm_sysreg_table(void);
95 
96 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
97