Lines Matching full:ve
197 * that no #VE will be delivered for accesses to TD-private memory.
199 * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
200 * controls if the guest will receive such #VE with TD attribute
206 * Check if the feature is available and disable SEPT #VE if possible.
216 const char *msg = "TD misconfiguration: SEPT #VE has to be disabled"; in disable_sept_ve()
220 /* Is this TD allowed to disable SEPT #VE */ in disable_sept_ve()
223 /* No SEPT #VE controls for the guest: check the attribute */ in disable_sept_ve()
235 /* Check if SEPT #VE has been disabled before us */ in disable_sept_ve()
250 * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
325 * The TDX module spec states that #VE may be injected for a limited set of
328 * - Emulation of the architectural #VE injection on EPT violation;
342 * information if #VE occurred due to instruction execution, but not for EPT
345 static int ve_instr_len(struct ve_info *ve) in ve_instr_len() argument
347 switch (ve->exit_reason) { in ve_instr_len()
353 /* It is safe to use ve->instr_len for #VE due instructions */ in ve_instr_len()
354 return ve->instr_len; in ve_instr_len()
357 * For EPT violations, ve->insn_len is not defined. For those, in ve_instr_len()
361 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); in ve_instr_len()
364 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); in ve_instr_len()
365 return ve->instr_len; in ve_instr_len()
392 static int handle_halt(struct ve_info *ve) in handle_halt() argument
399 return ve_instr_len(ve); in handle_halt()
423 static int read_msr(struct pt_regs *regs, struct ve_info *ve) in read_msr() argument
441 return ve_instr_len(ve); in read_msr()
444 static int write_msr(struct pt_regs *regs, struct ve_info *ve) in write_msr() argument
461 return ve_instr_len(ve); in write_msr()
464 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) in handle_cpuid() argument
482 return ve_instr_len(ve); in handle_cpuid()
503 return ve_instr_len(ve); in handle_cpuid()
529 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) in handle_mmio() argument
558 if (!fault_in_kernel_space(ve->gla)) { in handle_mmio()
580 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
585 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
607 if (!mmio_read(size, ve->gpa, &val)) in handle_mmio()
687 static int handle_io(struct pt_regs *regs, struct ve_info *ve) in handle_io() argument
689 u32 exit_qual = ve->exit_qual; in handle_io()
708 return ve_instr_len(ve); in handle_io()
712 * Early #VE exception handler. Only handles a subset of port I/O.
717 struct ve_info ve; in tdx_early_handle_ve() local
720 tdx_get_ve_info(&ve); in tdx_early_handle_ve()
722 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION) in tdx_early_handle_ve()
725 insn_len = handle_io(regs, &ve); in tdx_early_handle_ve()
733 void tdx_get_ve_info(struct ve_info *ve) in tdx_get_ve_info() argument
738 * Called during #VE handling to retrieve the #VE info from the in tdx_get_ve_info()
741 * This has to be called early in #VE handling. A "nested" #VE which in tdx_get_ve_info()
744 * The call retrieves the #VE info from the TDX module, which also in tdx_get_ve_info()
745 * clears the "#VE valid" flag. This must be done before anything else in tdx_get_ve_info()
746 * because any #VE that occurs while the valid flag is set will lead to in tdx_get_ve_info()
749 * Note, the TDX module treats virtual NMIs as inhibited if the #VE in tdx_get_ve_info()
750 * valid flag is set. It means that NMI=>#VE will not result in a #DF. in tdx_get_ve_info()
755 ve->exit_reason = args.rcx; in tdx_get_ve_info()
756 ve->exit_qual = args.rdx; in tdx_get_ve_info()
757 ve->gla = args.r8; in tdx_get_ve_info()
758 ve->gpa = args.r9; in tdx_get_ve_info()
759 ve->instr_len = lower_32_bits(args.r10); in tdx_get_ve_info()
760 ve->instr_info = upper_32_bits(args.r10); in tdx_get_ve_info()
764 * Handle the user initiated #VE.
769 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) in virt_exception_user() argument
771 switch (ve->exit_reason) { in virt_exception_user()
773 return handle_cpuid(regs, ve); in virt_exception_user()
775 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_user()
786 * Handle the kernel #VE.
791 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) in virt_exception_kernel() argument
793 switch (ve->exit_reason) { in virt_exception_kernel()
795 return handle_halt(ve); in virt_exception_kernel()
797 return read_msr(regs, ve); in virt_exception_kernel()
799 return write_msr(regs, ve); in virt_exception_kernel()
801 return handle_cpuid(regs, ve); in virt_exception_kernel()
803 if (is_private_gpa(ve->gpa)) in virt_exception_kernel()
805 return handle_mmio(regs, ve); in virt_exception_kernel()
807 return handle_io(regs, ve); in virt_exception_kernel()
809 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_kernel()
814 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve) in tdx_handle_virt_exception() argument
819 insn_len = virt_exception_user(regs, ve); in tdx_handle_virt_exception()
821 insn_len = virt_exception_kernel(regs, ve); in tdx_handle_virt_exception()
825 /* After successful #VE handling, move the IP */ in tdx_handle_virt_exception()
1102 * - Shared mapping => Private Page == Recoverable #VE in tdx_early_init()
1111 * which can result in a #VE. But, there is never a private mapping to in tdx_early_init()
1124 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that in tdx_early_init()
1138 * bringup low level code. That raises #VE which cannot be handled in tdx_early_init()