1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <[email protected]>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_pgtable.h>
13 
14 /* Maximum number of VMs that can co-exist under pKVM. */
15 #define KVM_MAX_PVMS 255
16 
17 #define HYP_MEMBLOCK_REGIONS 128
18 
19 int pkvm_init_host_vm(struct kvm *kvm);
20 int pkvm_create_hyp_vm(struct kvm *kvm);
21 void pkvm_destroy_hyp_vm(struct kvm *kvm);
22 
23 /*
24  * This functions as an allow-list of protected VM capabilities.
25  * Features not explicitly allowed by this function are denied.
26  */
kvm_pvm_ext_allowed(long ext)27 static inline bool kvm_pvm_ext_allowed(long ext)
28 {
29 	switch (ext) {
30 	case KVM_CAP_IRQCHIP:
31 	case KVM_CAP_ARM_PSCI:
32 	case KVM_CAP_ARM_PSCI_0_2:
33 	case KVM_CAP_NR_VCPUS:
34 	case KVM_CAP_MAX_VCPUS:
35 	case KVM_CAP_MAX_VCPU_ID:
36 	case KVM_CAP_MSI_DEVID:
37 	case KVM_CAP_ARM_VM_IPA_SIZE:
38 	case KVM_CAP_ARM_PMU_V3:
39 	case KVM_CAP_ARM_SVE:
40 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
41 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
42 		return true;
43 	default:
44 		return false;
45 	}
46 }
47 
48 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
49 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
50 
51 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)52 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
53 {
54 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
55 	unsigned long start, end;
56 
57 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
58 	end = start + nr_pages * vmemmap_entry_size;
59 	start = ALIGN_DOWN(start, PAGE_SIZE);
60 	end = ALIGN(end, PAGE_SIZE);
61 
62 	return end - start;
63 }
64 
hyp_vmemmap_pages(size_t vmemmap_entry_size)65 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
66 {
67 	unsigned long res = 0, i;
68 
69 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
70 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
71 						 vmemmap_entry_size);
72 	}
73 
74 	return res >> PAGE_SHIFT;
75 }
76 
hyp_vm_table_pages(void)77 static inline unsigned long hyp_vm_table_pages(void)
78 {
79 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
80 }
81 
__hyp_pgtable_max_pages(unsigned long nr_pages)82 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
83 {
84 	unsigned long total = 0;
85 	int i;
86 
87 	/* Provision the worst case scenario */
88 	for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
89 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
90 		total += nr_pages;
91 	}
92 
93 	return total;
94 }
95 
__hyp_pgtable_total_pages(void)96 static inline unsigned long __hyp_pgtable_total_pages(void)
97 {
98 	unsigned long res = 0, i;
99 
100 	/* Cover all of memory with page-granularity */
101 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
102 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
103 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
104 	}
105 
106 	return res;
107 }
108 
hyp_s1_pgtable_pages(void)109 static inline unsigned long hyp_s1_pgtable_pages(void)
110 {
111 	unsigned long res;
112 
113 	res = __hyp_pgtable_total_pages();
114 
115 	/* Allow 1 GiB for private mappings */
116 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
117 
118 	return res;
119 }
120 
host_s2_pgtable_pages(void)121 static inline unsigned long host_s2_pgtable_pages(void)
122 {
123 	unsigned long res;
124 
125 	/*
126 	 * Include an extra 16 pages to safely upper-bound the worst case of
127 	 * concatenated pgds.
128 	 */
129 	res = __hyp_pgtable_total_pages() + 16;
130 
131 	/* Allow 1 GiB for MMIO mappings */
132 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
133 
134 	return res;
135 }
136 
137 #define KVM_FFA_MBOX_NR_PAGES	1
138 
hyp_ffa_proxy_pages(void)139 static inline unsigned long hyp_ffa_proxy_pages(void)
140 {
141 	size_t desc_max;
142 
143 	/*
144 	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
145 	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
146 	 */
147 	desc_max = sizeof(struct ffa_mem_region) +
148 		   sizeof(struct ffa_mem_region_attributes) +
149 		   sizeof(struct ffa_composite_mem_region) +
150 		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
151 
152 	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
153 	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
154 }
155 
pkvm_host_sve_state_size(void)156 static inline size_t pkvm_host_sve_state_size(void)
157 {
158 	if (!system_supports_sve())
159 		return 0;
160 
161 	return size_add(sizeof(struct cpu_sve_state),
162 			SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
163 }
164 
165 struct pkvm_mapping {
166 	struct rb_node node;
167 	u64 gfn;
168 	u64 pfn;
169 };
170 
171 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
172 			     struct kvm_pgtable_mm_ops *mm_ops);
173 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
174 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
175 			    enum kvm_pgtable_prot prot, void *mc,
176 			    enum kvm_pgtable_walk_flags flags);
177 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
178 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
179 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
180 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
181 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
182 				    enum kvm_pgtable_walk_flags flags);
183 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
184 				 enum kvm_pgtable_walk_flags flags);
185 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
186 			      struct kvm_mmu_memory_cache *mc);
187 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
188 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
189 					       enum kvm_pgtable_prot prot, void *mc,
190 					       bool force_pte);
191 #endif	/* __ARM64_KVM_PKVM_H__ */
192