1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 2023 SUSE LLC <[email protected]> 4 * 5 * x86-specific KVM helper functions and structures for AMD SVM 6 */ 7 8 #ifndef KVM_X86_SVM_H_ 9 #define KVM_X86_SVM_H_ 10 11 #include "kvm_x86.h" 12 13 /* CPUID_GET_SVM_FEATURES flags returned in EDX */ 14 #define SVM_CPUID_NESTED_PAGING (1 << 0) 15 #define SVM_CPUID_LBR_VIRT (1 << 1) 16 #define SVM_CPUID_LOCK (1 << 2) 17 #define SVM_CPUID_NRIP_SAVE (1 << 3) 18 #define SVM_CPUID_TSC_RATE_MSR (1 << 4) 19 #define SVM_CPUID_VMCB_CLEAN (1 << 5) 20 #define SVM_CPUID_FLUSH_ASID (1 << 6) 21 #define SVM_CPUID_DECODE_ASSIST (1 << 7) 22 #define SVM_CPUID_PAUSE_FILTER (1 << 10) 23 #define SVM_CPUID_PAUSE_THRESHOLD (1 << 12) 24 #define SVM_CPUID_AVIC (1 << 13) 25 #define SVM_CPUID_VMSAVE_VIRT (1 << 15) 26 #define SVM_CPUID_VGIF (1 << 16) 27 #define SVM_CPUID_GMET (1 << 17) 28 #define SVM_CPUID_X2AVIC (1 << 18) 29 #define SVM_CPUID_SSSCHECK (1 << 19) 30 #define SVM_CPUID_SPEC_CTRL (1 << 20) 31 #define SVM_CPUID_ROGPT (1 << 21) 32 #define SVM_CPUID_HOST_MCE_OVERRIDE (1 << 23) 33 #define SVM_CPUID_TLBI_CTL (1 << 24) 34 #define SVM_CPUID_NMI_VIRT (1 << 25) 35 #define SVM_CPUID_IBS_VIRT (1 << 26) 36 37 /* SVM event intercept IDs */ 38 #define SVM_INTERCEPT_HLT 0x78 39 #define SVM_INTERCEPT_VMRUN 0x80 40 #define SVM_INTERCEPT_VMLOAD 0x82 41 #define SVM_INTERCEPT_VMSAVE 0x83 42 #define SVM_INTERCEPT_STGI 0x84 43 #define SVM_INTERCEPT_CLGI 0x85 44 #define SVM_INTERCEPT_MAX 0x95 45 46 /* SVM vmrun exit codes */ 47 #define SVM_EXIT_HLT 0x78 48 #define SVM_EXIT_VMRUN 0x80 49 #define SVM_EXIT_VMLOAD 0x82 50 #define SVM_EXIT_VMSAVE 0x83 51 #define SVM_EXIT_STGI 0x84 52 #define SVM_EXIT_CLGI 0x85 53 #define SVM_EXIT_AVIC_NOACCEL 0x402 54 #define SVM_EXIT_INVALID ((uint64_t)-1) 55 56 /* SVM VMCB flags */ 57 #define SVM_INTR_AVIC (1 << 7) 58 59 struct kvm_vmcb_descriptor { 60 uint16_t selector; 61 uint16_t attrib; 62 uint32_t limit; 63 uint64_t base; 64 }; 65 66 struct kvm_vmcb { 67 /* VMCB control area */ 68 uint8_t intercepts[20]; 69 uint8_t reserved1[44]; 70 uint64_t iopm_base_addr; 71 uint64_t msrpm_base_addr; 72 uint64_t tsc_offset; 73 uint32_t guest_asid; 74 uint32_t tlb_control; 75 uint8_t virtual_tpr; 76 uint8_t virtual_irq; 77 unsigned char virt_intr_prio: 4; 78 unsigned char virt_ignore_tpr: 4; 79 uint8_t virt_intr_ctl; 80 uint8_t virt_intr_vector; 81 uint8_t reserved2[3]; 82 uint64_t interrupt_shadow; 83 uint64_t exitcode; 84 uint64_t exitinfo1; 85 uint64_t exitinfo2; 86 uint64_t exit_int_info; 87 uint64_t enable_nested_paging; 88 uint64_t avic_bar; 89 uint64_t ghcb_gpa; 90 uint64_t event_injection; 91 uint64_t nested_cr3; 92 uint64_t virt_ext; 93 uint32_t vmcb_clean; 94 uint8_t reserved3[4]; 95 uint64_t next_rip; 96 uint8_t instr_len; 97 uint8_t instr_bytes[15]; 98 uint64_t avic_backing_page; 99 uint8_t reserved4[8]; 100 uint64_t avic_logical_ptr; 101 uint64_t avic_physical_ptr; 102 uint8_t reserved5[8]; 103 uint64_t vmsa_pa; 104 uint64_t vmgexit_rax; 105 uint8_t vmgexit_cpl; 106 uint8_t reserved6[0x2e7]; 107 108 /* VMCB state save area */ 109 struct kvm_vmcb_descriptor es, cs, ss, ds, fs, gs; 110 struct kvm_vmcb_descriptor gdtr, ldtr, idtr, tr; 111 uint8_t reserved7[43]; 112 uint8_t cpl; 113 uint8_t reserved8[4]; 114 uint64_t efer; 115 uint8_t reserved9[112]; 116 uint64_t cr4; 117 uint64_t cr3; 118 uint64_t cr0; 119 uint64_t dr7; 120 uint64_t dr6; 121 uint64_t rflags; 122 uint64_t rip; 123 uint8_t reserved10[88]; 124 uint64_t rsp; 125 uint64_t s_cet; 126 uint64_t ssp; 127 uint64_t isst_addr; 128 uint64_t rax; 129 uint64_t star; 130 uint64_t lstar; 131 uint64_t cstar; 132 uint64_t sfmask; 133 uint64_t kernel_gs_base; 134 uint64_t sysenter_cs; 135 uint64_t sysenter_esp; 136 uint64_t sysenter_eip; 137 uint64_t cr2; 138 uint8_t reserved11[32]; 139 uint64_t guest_pat; 140 uint8_t padding[0x990]; 141 }; 142 143 struct kvm_svm_vcpu { 144 struct kvm_vmcb *vmcb; 145 struct kvm_regs64 regs; 146 }; 147 148 /* AMD SVM virtualization helper functions */ 149 int kvm_is_svm_supported(void); 150 int kvm_get_svm_state(void); 151 void kvm_set_svm_state(int enabled); 152 153 void kvm_init_svm(void); /* Fully initialize host SVM environment */ 154 struct kvm_vmcb *kvm_alloc_vmcb(void); 155 void kvm_vmcb_copy_gdt_descriptor(struct kvm_vmcb_descriptor *dst, 156 unsigned int gdt_id); 157 void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id, 158 unsigned int state); 159 void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss, 160 void *rsp, int (*guest_main)(void)); 161 struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void), 162 int alloc_stack); 163 164 void kvm_svm_vmrun(struct kvm_svm_vcpu *cpu); 165 166 /* Load FS, GS, TR and LDTR state from vmsave_buf */ 167 void kvm_svm_vmload(struct kvm_vmcb *buf); 168 169 /* Save current FS, GS, TR and LDTR state to vmsave_buf */ 170 void kvm_svm_vmsave(struct kvm_vmcb *buf); 171 172 #endif /* KVM_X86_SVM_H_ */ 173