1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <[email protected]>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/pkvm.h>
20 #include <nvhe/trap_handler.h>
21
22 unsigned long hyp_nr_cpus;
23
24 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
25 (unsigned long)__per_cpu_start)
26
27 static void *vmemmap_base;
28 static void *vm_table_base;
29 static void *hyp_pgt_base;
30 static void *host_s2_pgt_base;
31 static void *ffa_proxy_pages;
32 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
33 static struct hyp_pool hpool;
34
divide_memory_pool(void * virt,unsigned long size)35 static int divide_memory_pool(void *virt, unsigned long size)
36 {
37 unsigned long nr_pages;
38
39 hyp_early_alloc_init(virt, size);
40
41 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
42 vmemmap_base = hyp_early_alloc_contig(nr_pages);
43 if (!vmemmap_base)
44 return -ENOMEM;
45
46 nr_pages = hyp_vm_table_pages();
47 vm_table_base = hyp_early_alloc_contig(nr_pages);
48 if (!vm_table_base)
49 return -ENOMEM;
50
51 nr_pages = hyp_s1_pgtable_pages();
52 hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
53 if (!hyp_pgt_base)
54 return -ENOMEM;
55
56 nr_pages = host_s2_pgtable_pages();
57 host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
58 if (!host_s2_pgt_base)
59 return -ENOMEM;
60
61 nr_pages = hyp_ffa_proxy_pages();
62 ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
63 if (!ffa_proxy_pages)
64 return -ENOMEM;
65
66 return 0;
67 }
68
pkvm_create_host_sve_mappings(void)69 static int pkvm_create_host_sve_mappings(void)
70 {
71 void *start, *end;
72 int ret, i;
73
74 if (!system_supports_sve())
75 return 0;
76
77 for (i = 0; i < hyp_nr_cpus; i++) {
78 struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
79 struct cpu_sve_state *sve_state = host_data->sve_state;
80
81 start = kern_hyp_va(sve_state);
82 end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
83 ret = pkvm_create_mappings(start, end, PAGE_HYP);
84 if (ret)
85 return ret;
86 }
87
88 return 0;
89 }
90
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)91 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
92 unsigned long *per_cpu_base,
93 u32 hyp_va_bits)
94 {
95 void *start, *end, *virt = hyp_phys_to_virt(phys);
96 unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
97 int ret, i;
98
99 /* Recreate the hyp page-table using the early page allocator */
100 hyp_early_alloc_init(hyp_pgt_base, pgt_size);
101 ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
102 &hyp_early_alloc_mm_ops);
103 if (ret)
104 return ret;
105
106 ret = hyp_create_idmap(hyp_va_bits);
107 if (ret)
108 return ret;
109
110 ret = hyp_map_vectors();
111 if (ret)
112 return ret;
113
114 ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
115 if (ret)
116 return ret;
117
118 ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
119 if (ret)
120 return ret;
121
122 ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
123 if (ret)
124 return ret;
125
126 ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
127 if (ret)
128 return ret;
129
130 ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
131 if (ret)
132 return ret;
133
134 for (i = 0; i < hyp_nr_cpus; i++) {
135 struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
136
137 start = (void *)kern_hyp_va(per_cpu_base[i]);
138 end = start + PAGE_ALIGN(hyp_percpu_size);
139 ret = pkvm_create_mappings(start, end, PAGE_HYP);
140 if (ret)
141 return ret;
142
143 ret = pkvm_create_stack(params->stack_pa, ¶ms->stack_hyp_va);
144 if (ret)
145 return ret;
146 }
147
148 return pkvm_create_host_sve_mappings();
149 }
150
update_nvhe_init_params(void)151 static void update_nvhe_init_params(void)
152 {
153 struct kvm_nvhe_init_params *params;
154 unsigned long i;
155
156 for (i = 0; i < hyp_nr_cpus; i++) {
157 params = per_cpu_ptr(&kvm_init_params, i);
158 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
159 dcache_clean_inval_poc((unsigned long)params,
160 (unsigned long)params + sizeof(*params));
161 }
162 }
163
hyp_zalloc_hyp_page(void * arg)164 static void *hyp_zalloc_hyp_page(void *arg)
165 {
166 return hyp_alloc_pages(&hpool, 0);
167 }
168
hpool_get_page(void * addr)169 static void hpool_get_page(void *addr)
170 {
171 hyp_get_page(&hpool, addr);
172 }
173
hpool_put_page(void * addr)174 static void hpool_put_page(void *addr)
175 {
176 hyp_put_page(&hpool, addr);
177 }
178
fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)179 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
180 enum kvm_pgtable_walk_flags visit)
181 {
182 enum pkvm_page_state state;
183 phys_addr_t phys;
184
185 if (!kvm_pte_valid(ctx->old))
186 return 0;
187
188 if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
189 return -EINVAL;
190
191 phys = kvm_pte_to_phys(ctx->old);
192 if (!addr_is_memory(phys))
193 return -EINVAL;
194
195 /*
196 * Adjust the host stage-2 mappings to match the ownership attributes
197 * configured in the hypervisor stage-1.
198 */
199 state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
200 switch (state) {
201 case PKVM_PAGE_OWNED:
202 return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
203 case PKVM_PAGE_SHARED_OWNED:
204 hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED;
205 break;
206 case PKVM_PAGE_SHARED_BORROWED:
207 hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED;
208 break;
209 default:
210 return -EINVAL;
211 }
212
213 return 0;
214 }
215
fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)216 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
217 enum kvm_pgtable_walk_flags visit)
218 {
219 /*
220 * Fix-up the refcount for the page-table pages as the early allocator
221 * was unable to access the hyp_vmemmap and so the buddy allocator has
222 * initialised the refcount to '1'.
223 */
224 if (kvm_pte_valid(ctx->old))
225 ctx->mm_ops->get_page(ctx->ptep);
226
227 return 0;
228 }
229
fix_host_ownership(void)230 static int fix_host_ownership(void)
231 {
232 struct kvm_pgtable_walker walker = {
233 .cb = fix_host_ownership_walker,
234 .flags = KVM_PGTABLE_WALK_LEAF,
235 };
236 int i, ret;
237
238 for (i = 0; i < hyp_memblock_nr; i++) {
239 struct memblock_region *reg = &hyp_memory[i];
240 u64 start = (u64)hyp_phys_to_virt(reg->base);
241
242 ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
243 if (ret)
244 return ret;
245 }
246
247 return 0;
248 }
249
fix_hyp_pgtable_refcnt(void)250 static int fix_hyp_pgtable_refcnt(void)
251 {
252 struct kvm_pgtable_walker walker = {
253 .cb = fix_hyp_pgtable_refcnt_walker,
254 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
255 .arg = pkvm_pgtable.mm_ops,
256 };
257
258 return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
259 &walker);
260 }
261
__pkvm_init_finalise(void)262 void __noreturn __pkvm_init_finalise(void)
263 {
264 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
265 unsigned long nr_pages, reserved_pages, pfn;
266 int ret;
267
268 /* Now that the vmemmap is backed, install the full-fledged allocator */
269 pfn = hyp_virt_to_pfn(hyp_pgt_base);
270 nr_pages = hyp_s1_pgtable_pages();
271 reserved_pages = hyp_early_alloc_nr_used_pages();
272 ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
273 if (ret)
274 goto out;
275
276 ret = kvm_host_prepare_stage2(host_s2_pgt_base);
277 if (ret)
278 goto out;
279
280 pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
281 .zalloc_page = hyp_zalloc_hyp_page,
282 .phys_to_virt = hyp_phys_to_virt,
283 .virt_to_phys = hyp_virt_to_phys,
284 .get_page = hpool_get_page,
285 .put_page = hpool_put_page,
286 .page_count = hyp_page_count,
287 };
288 pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
289
290 ret = fix_host_ownership();
291 if (ret)
292 goto out;
293
294 ret = fix_hyp_pgtable_refcnt();
295 if (ret)
296 goto out;
297
298 ret = hyp_create_pcpu_fixmap();
299 if (ret)
300 goto out;
301
302 ret = hyp_ffa_init(ffa_proxy_pages);
303 if (ret)
304 goto out;
305
306 pkvm_hyp_vm_table_init(vm_table_base);
307 out:
308 /*
309 * We tail-called to here from handle___pkvm_init() and will not return,
310 * so make sure to propagate the return value to the host.
311 */
312 cpu_reg(host_ctxt, 1) = ret;
313
314 __host_enter(host_ctxt);
315 }
316
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)317 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
318 unsigned long *per_cpu_base, u32 hyp_va_bits)
319 {
320 struct kvm_nvhe_init_params *params;
321 void *virt = hyp_phys_to_virt(phys);
322 typeof(__pkvm_init_switch_pgd) *fn;
323 int ret;
324
325 BUG_ON(kvm_check_pvm_sysreg_table());
326
327 if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
328 return -EINVAL;
329
330 hyp_spin_lock_init(&pkvm_pgd_lock);
331 hyp_nr_cpus = nr_cpus;
332
333 ret = divide_memory_pool(virt, size);
334 if (ret)
335 return ret;
336
337 ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
338 if (ret)
339 return ret;
340
341 update_nvhe_init_params();
342
343 /* Jump in the idmap page to switch to the new page-tables */
344 params = this_cpu_ptr(&kvm_init_params);
345 fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
346 fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
347
348 unreachable();
349 }
350