1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <[email protected]>
7  */
8 
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/ptrace.h>
19 #include <asm/kvm_vcpu_fp.h>
20 #include <asm/kvm_vcpu_insn.h>
21 #include <asm/kvm_vcpu_sbi.h>
22 #include <asm/kvm_vcpu_timer.h>
23 #include <asm/kvm_vcpu_pmu.h>
24 
25 #define KVM_MAX_VCPUS			1024
26 
27 #define KVM_HALT_POLL_NS_DEFAULT	500000
28 
29 #define KVM_VCPU_MAX_FEATURES		0
30 
31 #define KVM_IRQCHIP_NUM_PINS		1024
32 
33 #define KVM_REQ_SLEEP \
34 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_VCPU_RESET		KVM_ARCH_REQ(1)
36 #define KVM_REQ_UPDATE_HGATP		KVM_ARCH_REQ(2)
37 #define KVM_REQ_FENCE_I			\
38 	KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL	KVM_REQ_TLB_FLUSH
40 #define KVM_REQ_HFENCE_VVMA_ALL		\
41 	KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 #define KVM_REQ_HFENCE			\
43 	KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(6)
45 
46 #define KVM_HEDELEG_DEFAULT		(BIT(EXC_INST_MISALIGNED) | \
47 					 BIT(EXC_BREAKPOINT)      | \
48 					 BIT(EXC_SYSCALL)         | \
49 					 BIT(EXC_INST_PAGE_FAULT) | \
50 					 BIT(EXC_LOAD_PAGE_FAULT) | \
51 					 BIT(EXC_STORE_PAGE_FAULT))
52 
53 #define KVM_HIDELEG_DEFAULT		(BIT(IRQ_VS_SOFT)  | \
54 					 BIT(IRQ_VS_TIMER) | \
55 					 BIT(IRQ_VS_EXT))
56 
57 enum kvm_riscv_hfence_type {
58 	KVM_RISCV_HFENCE_UNKNOWN = 0,
59 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
60 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
61 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
62 	KVM_RISCV_HFENCE_VVMA_GVA,
63 };
64 
65 struct kvm_riscv_hfence {
66 	enum kvm_riscv_hfence_type type;
67 	unsigned long asid;
68 	unsigned long order;
69 	gpa_t addr;
70 	gpa_t size;
71 };
72 
73 #define KVM_RISCV_VCPU_MAX_HFENCE	64
74 
75 struct kvm_vm_stat {
76 	struct kvm_vm_stat_generic generic;
77 };
78 
79 struct kvm_vcpu_stat {
80 	struct kvm_vcpu_stat_generic generic;
81 	u64 ecall_exit_stat;
82 	u64 wfi_exit_stat;
83 	u64 wrs_exit_stat;
84 	u64 mmio_exit_user;
85 	u64 mmio_exit_kernel;
86 	u64 csr_exit_user;
87 	u64 csr_exit_kernel;
88 	u64 signal_exits;
89 	u64 exits;
90 	u64 instr_illegal_exits;
91 	u64 load_misaligned_exits;
92 	u64 store_misaligned_exits;
93 	u64 load_access_exits;
94 	u64 store_access_exits;
95 };
96 
97 struct kvm_arch_memory_slot {
98 };
99 
100 struct kvm_vmid {
101 	/*
102 	 * Writes to vmid_version and vmid happen with vmid_lock held
103 	 * whereas reads happen without any lock held.
104 	 */
105 	unsigned long vmid_version;
106 	unsigned long vmid;
107 };
108 
109 struct kvm_arch {
110 	/* G-stage vmid */
111 	struct kvm_vmid vmid;
112 
113 	/* G-stage page table */
114 	pgd_t *pgd;
115 	phys_addr_t pgd_phys;
116 
117 	/* Guest Timer */
118 	struct kvm_guest_timer timer;
119 
120 	/* AIA Guest/VM context */
121 	struct kvm_aia aia;
122 };
123 
124 struct kvm_cpu_trap {
125 	unsigned long sepc;
126 	unsigned long scause;
127 	unsigned long stval;
128 	unsigned long htval;
129 	unsigned long htinst;
130 };
131 
132 struct kvm_cpu_context {
133 	unsigned long zero;
134 	unsigned long ra;
135 	unsigned long sp;
136 	unsigned long gp;
137 	unsigned long tp;
138 	unsigned long t0;
139 	unsigned long t1;
140 	unsigned long t2;
141 	unsigned long s0;
142 	unsigned long s1;
143 	unsigned long a0;
144 	unsigned long a1;
145 	unsigned long a2;
146 	unsigned long a3;
147 	unsigned long a4;
148 	unsigned long a5;
149 	unsigned long a6;
150 	unsigned long a7;
151 	unsigned long s2;
152 	unsigned long s3;
153 	unsigned long s4;
154 	unsigned long s5;
155 	unsigned long s6;
156 	unsigned long s7;
157 	unsigned long s8;
158 	unsigned long s9;
159 	unsigned long s10;
160 	unsigned long s11;
161 	unsigned long t3;
162 	unsigned long t4;
163 	unsigned long t5;
164 	unsigned long t6;
165 	unsigned long sepc;
166 	unsigned long sstatus;
167 	unsigned long hstatus;
168 	union __riscv_fp_state fp;
169 	struct __riscv_v_ext_state vector;
170 };
171 
172 struct kvm_vcpu_csr {
173 	unsigned long vsstatus;
174 	unsigned long vsie;
175 	unsigned long vstvec;
176 	unsigned long vsscratch;
177 	unsigned long vsepc;
178 	unsigned long vscause;
179 	unsigned long vstval;
180 	unsigned long hvip;
181 	unsigned long vsatp;
182 	unsigned long scounteren;
183 	unsigned long senvcfg;
184 };
185 
186 struct kvm_vcpu_config {
187 	u64 henvcfg;
188 	u64 hstateen0;
189 	unsigned long hedeleg;
190 };
191 
192 struct kvm_vcpu_smstateen_csr {
193 	unsigned long sstateen0;
194 };
195 
196 struct kvm_vcpu_arch {
197 	/* VCPU ran at least once */
198 	bool ran_atleast_once;
199 
200 	/* Last Host CPU on which Guest VCPU exited */
201 	int last_exit_cpu;
202 
203 	/* ISA feature bits (similar to MISA) */
204 	DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
205 
206 	/* Vendor, Arch, and Implementation details */
207 	unsigned long mvendorid;
208 	unsigned long marchid;
209 	unsigned long mimpid;
210 
211 	/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
212 	unsigned long host_sscratch;
213 	unsigned long host_stvec;
214 	unsigned long host_scounteren;
215 	unsigned long host_senvcfg;
216 	unsigned long host_sstateen0;
217 
218 	/* CPU context of Host */
219 	struct kvm_cpu_context host_context;
220 
221 	/* CPU context of Guest VCPU */
222 	struct kvm_cpu_context guest_context;
223 
224 	/* CPU CSR context of Guest VCPU */
225 	struct kvm_vcpu_csr guest_csr;
226 
227 	/* CPU Smstateen CSR context of Guest VCPU */
228 	struct kvm_vcpu_smstateen_csr smstateen_csr;
229 
230 	/* CPU context upon Guest VCPU reset */
231 	struct kvm_cpu_context guest_reset_context;
232 	spinlock_t reset_cntx_lock;
233 
234 	/* CPU CSR context upon Guest VCPU reset */
235 	struct kvm_vcpu_csr guest_reset_csr;
236 
237 	/*
238 	 * VCPU interrupts
239 	 *
240 	 * We have a lockless approach for tracking pending VCPU interrupts
241 	 * implemented using atomic bitops. The irqs_pending bitmap represent
242 	 * pending interrupts whereas irqs_pending_mask represent bits changed
243 	 * in irqs_pending. Our approach is modeled around multiple producer
244 	 * and single consumer problem where the consumer is the VCPU itself.
245 	 */
246 #define KVM_RISCV_VCPU_NR_IRQS	64
247 	DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
248 	DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
249 
250 	/* VCPU Timer */
251 	struct kvm_vcpu_timer timer;
252 
253 	/* HFENCE request queue */
254 	spinlock_t hfence_lock;
255 	unsigned long hfence_head;
256 	unsigned long hfence_tail;
257 	struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
258 
259 	/* MMIO instruction details */
260 	struct kvm_mmio_decode mmio_decode;
261 
262 	/* CSR instruction details */
263 	struct kvm_csr_decode csr_decode;
264 
265 	/* SBI context */
266 	struct kvm_vcpu_sbi_context sbi_context;
267 
268 	/* AIA VCPU context */
269 	struct kvm_vcpu_aia aia_context;
270 
271 	/* Cache pages needed to program page tables with spinlock held */
272 	struct kvm_mmu_memory_cache mmu_page_cache;
273 
274 	/* VCPU power state */
275 	struct kvm_mp_state mp_state;
276 	spinlock_t mp_state_lock;
277 
278 	/* Don't run the VCPU (blocked) */
279 	bool pause;
280 
281 	/* Performance monitoring context */
282 	struct kvm_pmu pmu_context;
283 
284 	/* 'static' configurations which are set only once */
285 	struct kvm_vcpu_config cfg;
286 
287 	/* SBI steal-time accounting */
288 	struct {
289 		gpa_t shmem;
290 		u64 last_steal;
291 	} sta;
292 };
293 
294 /*
295  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
296  * arrived in guest context.  For riscv, any event that arrives while a vCPU is
297  * loaded is considered to be "in guest".
298  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)299 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
300 {
301 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
302 }
303 
kvm_arch_sync_events(struct kvm * kvm)304 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
305 
306 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
307 
308 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
309 					  gpa_t gpa, gpa_t gpsz,
310 					  unsigned long order);
311 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
312 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
313 				     unsigned long order);
314 void kvm_riscv_local_hfence_gvma_all(void);
315 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
316 					  unsigned long asid,
317 					  unsigned long gva,
318 					  unsigned long gvsz,
319 					  unsigned long order);
320 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
321 					  unsigned long asid);
322 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
323 				     unsigned long gva, unsigned long gvsz,
324 				     unsigned long order);
325 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
326 
327 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
328 
329 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
330 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
331 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
332 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
333 
334 void kvm_riscv_fence_i(struct kvm *kvm,
335 		       unsigned long hbase, unsigned long hmask);
336 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
337 				    unsigned long hbase, unsigned long hmask,
338 				    gpa_t gpa, gpa_t gpsz,
339 				    unsigned long order);
340 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
341 				    unsigned long hbase, unsigned long hmask);
342 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
343 				    unsigned long hbase, unsigned long hmask,
344 				    unsigned long gva, unsigned long gvsz,
345 				    unsigned long order, unsigned long asid);
346 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
347 				    unsigned long hbase, unsigned long hmask,
348 				    unsigned long asid);
349 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
350 			       unsigned long hbase, unsigned long hmask,
351 			       unsigned long gva, unsigned long gvsz,
352 			       unsigned long order);
353 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
354 			       unsigned long hbase, unsigned long hmask);
355 
356 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
357 			     phys_addr_t hpa, unsigned long size,
358 			     bool writable, bool in_atomic);
359 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
360 			      unsigned long size);
361 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
362 			 struct kvm_memory_slot *memslot,
363 			 gpa_t gpa, unsigned long hva, bool is_write);
364 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
365 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
366 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
367 void __init kvm_riscv_gstage_mode_detect(void);
368 unsigned long __init kvm_riscv_gstage_mode(void);
369 int kvm_riscv_gstage_gpa_bits(void);
370 
371 void __init kvm_riscv_gstage_vmid_detect(void);
372 unsigned long kvm_riscv_gstage_vmid_bits(void);
373 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
374 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
375 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
376 
377 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
378 
379 void __kvm_riscv_unpriv_trap(void);
380 
381 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
382 					 bool read_insn,
383 					 unsigned long guest_addr,
384 					 struct kvm_cpu_trap *trap);
385 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
386 				  struct kvm_cpu_trap *trap);
387 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
388 			struct kvm_cpu_trap *trap);
389 
390 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
391 
392 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
393 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
394 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
395 				    u64 __user *uindices);
396 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
397 			   const struct kvm_one_reg *reg);
398 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
399 			   const struct kvm_one_reg *reg);
400 
401 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
402 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
403 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
404 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
405 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
406 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
407 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
408 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
409 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
410 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
411 
412 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
413 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
414 
415 #endif /* __RISCV_KVM_HOST_H__ */
416