Lines Matching +full:pse +full:- +full:pi

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
31 #include <linux/entry-kvm.h>
50 #include <asm/spec-ctrl.h>
77 MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
140 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
192 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
208 /* Default doubles per-vcpu window every exit. */
212 /* Default resets per-vcpu window every exit to ple_window. */
220 /* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
298 return -ENOMEM; in vmx_setup_l1d_flush()
337 return -EINVAL; in vmentry_l1d_flush_parse()
380 if (!vmx->disable_fb_clear) in vmx_disable_fb_clear()
387 vmx->msr_ia32_mcu_opt_ctrl = msr; in vmx_disable_fb_clear()
392 if (!vmx->disable_fb_clear) in vmx_enable_fb_clear()
395 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; in vmx_enable_fb_clear()
396 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); in vmx_enable_fb_clear()
406 * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry in vmx_update_fb_clear_dis()
407 * and VM-Exit. in vmx_update_fb_clear_dis()
409 vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) && in vmx_update_fb_clear_dis()
419 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || in vmx_update_fb_clear_dis()
420 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && in vmx_update_fb_clear_dis()
421 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && in vmx_update_fb_clear_dis()
422 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && in vmx_update_fb_clear_dis()
423 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && in vmx_update_fb_clear_dis()
424 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) in vmx_update_fb_clear_dis()
425 vmx->disable_fb_clear = false; in vmx_update_fb_clear_dis()
494 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
542 return -ENOMEM; in hv_enable_l2_tlb_flush()
544 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; in hv_enable_l2_tlb_flush()
546 evmcs->partition_assist_page = partition_assist_page; in hv_enable_l2_tlb_flush()
547 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; in hv_enable_l2_tlb_flush()
548 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; in hv_enable_l2_tlb_flush()
577 pr_info("Using Hyper-V Enlightened VMCS\n"); in hv_init_evmcs()
606 * Reset everything to support using non-enlightened VMCS access later in hv_reset_evmcs()
609 vp_ap->nested_control.features.directhypercall = 0; in hv_reset_evmcs()
610 vp_ap->current_nested_vmcs = 0; in hv_reset_evmcs()
611 vp_ap->enlighten_vmentry = 0; in hv_reset_evmcs()
620 * Comment's format: document - errata name - stepping - processor name.
625 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
627 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
628 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
629 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
631 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
633 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
634 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
636 * 320767.pdf - AAP86 - B1 -
637 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
640 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
642 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
644 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
646 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
647 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
648 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
650 /* Xeon E3-1220 V2 */
679 return -ENOENT; in vmx_get_passthrough_msr_slot()
694 return -ENOENT; in vmx_get_passthrough_msr_slot()
703 return -ENOENT; in vmx_get_passthrough_msr_slot()
712 return &vmx->guest_uret_msrs[i]; in vmx_find_uret_msr()
719 unsigned int slot = msr - vmx->guest_uret_msrs; in vmx_set_guest_uret_msr()
722 if (msr->load_into_hardware) { in vmx_set_guest_uret_msr()
724 ret = kvm_set_user_return_msr(slot, data, msr->mask); in vmx_set_guest_uret_msr()
728 msr->data = data; in vmx_set_guest_uret_msr()
735 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
736 * atomically track post-VMXON state, e.g. this may be called in NMI context.
738 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
752 return -EIO; in kvm_cpu_vmxoff()
773 vmcs_clear(v->vmcs); in vmx_emergency_disable_virtualization_cpu()
783 if (loaded_vmcs->cpu != cpu) in __loaded_vmcs_clear()
785 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) in __loaded_vmcs_clear()
788 vmcs_clear(loaded_vmcs->vmcs); in __loaded_vmcs_clear()
789 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) in __loaded_vmcs_clear()
790 vmcs_clear(loaded_vmcs->shadow_vmcs); in __loaded_vmcs_clear()
792 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); in __loaded_vmcs_clear()
796 * current percpu list, complete before setting loaded_vmcs->cpu to in __loaded_vmcs_clear()
797 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first in __loaded_vmcs_clear()
803 loaded_vmcs->cpu = -1; in __loaded_vmcs_clear()
804 loaded_vmcs->launched = 0; in __loaded_vmcs_clear()
809 int cpu = loaded_vmcs->cpu; in loaded_vmcs_clear()
811 if (cpu != -1) in loaded_vmcs_clear()
822 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { in vmx_segment_cache_test_set()
823 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
824 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
826 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
827 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
833 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
842 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
851 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
860 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
887 if ((vcpu->guest_debug & in vmx_update_exception_bitmap()
891 if (to_vmx(vcpu)->rmode.vm86_active) in vmx_update_exception_bitmap()
902 eb |= get_vmcs12(vcpu)->exception_bitmap; in vmx_update_exception_bitmap()
911 * non-reserved faults. For vmcs02, however, PFEC_MASK in vmx_update_exception_bitmap()
926 if (vcpu->arch.xfd_no_write_intercept) in vmx_update_exception_bitmap()
940 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr); in msr_write_intercepted()
947 if (vmx->loaded_vmcs->launched) in __vmx_vcpu_run_flags()
953 * it after vmexit and store it in vmx->spec_ctrl. in __vmx_vcpu_run_flags()
972 for (i = 0; i < m->nr; ++i) { in vmx_find_loadstore_msr_slot()
973 if (m->val[i].index == msr) in vmx_find_loadstore_msr_slot()
976 return -ENOENT; in vmx_find_loadstore_msr_slot()
982 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
1002 i = vmx_find_loadstore_msr_slot(&m->guest, msr); in clear_atomic_switch_msr()
1005 --m->guest.nr; in clear_atomic_switch_msr()
1006 m->guest.val[i] = m->guest.val[m->guest.nr]; in clear_atomic_switch_msr()
1007 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); in clear_atomic_switch_msr()
1010 i = vmx_find_loadstore_msr_slot(&m->host, msr); in clear_atomic_switch_msr()
1014 --m->host.nr; in clear_atomic_switch_msr()
1015 m->host.val[i] = m->host.val[m->host.nr]; in clear_atomic_switch_msr()
1016 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); in clear_atomic_switch_msr()
1035 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
1069 i = vmx_find_loadstore_msr_slot(&m->guest, msr); in add_atomic_switch_msr()
1071 j = vmx_find_loadstore_msr_slot(&m->host, msr); in add_atomic_switch_msr()
1073 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || in add_atomic_switch_msr()
1074 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { in add_atomic_switch_msr()
1080 i = m->guest.nr++; in add_atomic_switch_msr()
1081 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); in add_atomic_switch_msr()
1083 m->guest.val[i].index = msr; in add_atomic_switch_msr()
1084 m->guest.val[i].value = guest_val; in add_atomic_switch_msr()
1090 j = m->host.nr++; in add_atomic_switch_msr()
1091 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); in add_atomic_switch_msr()
1093 m->host.val[j].index = msr; in add_atomic_switch_msr()
1094 m->host.val[j].value = host_val; in add_atomic_switch_msr()
1099 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1124 (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) { in update_transition_efer()
1144 vmx->guest_uret_msrs[i].data = guest_efer; in update_transition_efer()
1145 vmx->guest_uret_msrs[i].mask = ~ignore_bits; in update_transition_efer()
1152 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1182 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_can_write_msr()
1187 /* The base must be 128-byte aligned and a legal physical address. */ in pt_output_base_valid()
1195 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); in pt_load_msr()
1196 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); in pt_load_msr()
1197 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); in pt_load_msr()
1198 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); in pt_load_msr()
1200 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); in pt_load_msr()
1201 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); in pt_load_msr()
1209 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); in pt_save_msr()
1210 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); in pt_save_msr()
1211 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); in pt_save_msr()
1212 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); in pt_save_msr()
1214 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); in pt_save_msr()
1215 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); in pt_save_msr()
1228 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_enter()
1229 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_enter()
1231 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); in pt_guest_enter()
1232 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); in pt_guest_enter()
1241 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_exit()
1242 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); in pt_guest_exit()
1243 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); in pt_guest_exit()
1248 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. in pt_guest_exit()
1250 if (vmx->pt_desc.host.ctl) in pt_guest_exit()
1251 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_exit()
1257 if (unlikely(fs_sel != host->fs_sel)) { in vmx_set_host_fs_gs()
1262 host->fs_sel = fs_sel; in vmx_set_host_fs_gs()
1264 if (unlikely(gs_sel != host->gs_sel)) { in vmx_set_host_fs_gs()
1269 host->gs_sel = gs_sel; in vmx_set_host_fs_gs()
1271 if (unlikely(fs_base != host->fs_base)) { in vmx_set_host_fs_gs()
1273 host->fs_base = fs_base; in vmx_set_host_fs_gs()
1275 if (unlikely(gs_base != host->gs_base)) { in vmx_set_host_fs_gs()
1277 host->gs_base = gs_base; in vmx_set_host_fs_gs()
1295 * to/from long-mode by setting MSR_EFER.LMA. in vmx_prepare_switch_to_guest()
1297 if (!vmx->guest_uret_msrs_loaded) { in vmx_prepare_switch_to_guest()
1298 vmx->guest_uret_msrs_loaded = true; in vmx_prepare_switch_to_guest()
1300 if (!vmx->guest_uret_msrs[i].load_into_hardware) in vmx_prepare_switch_to_guest()
1304 vmx->guest_uret_msrs[i].data, in vmx_prepare_switch_to_guest()
1305 vmx->guest_uret_msrs[i].mask); in vmx_prepare_switch_to_guest()
1309 if (vmx->nested.need_vmcs12_to_shadow_sync) in vmx_prepare_switch_to_guest()
1312 if (vmx->guest_state_loaded) in vmx_prepare_switch_to_guest()
1315 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_guest()
1321 host_state->ldt_sel = kvm_read_ldt(); in vmx_prepare_switch_to_guest()
1324 savesegment(ds, host_state->ds_sel); in vmx_prepare_switch_to_guest()
1325 savesegment(es, host_state->es_sel); in vmx_prepare_switch_to_guest()
1328 if (likely(is_64bit_mm(current->mm))) { in vmx_prepare_switch_to_guest()
1330 fs_sel = current->thread.fsindex; in vmx_prepare_switch_to_guest()
1331 gs_sel = current->thread.gsindex; in vmx_prepare_switch_to_guest()
1332 fs_base = current->thread.fsbase; in vmx_prepare_switch_to_guest()
1333 vmx->msr_host_kernel_gs_base = current->thread.gsbase; in vmx_prepare_switch_to_guest()
1338 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); in vmx_prepare_switch_to_guest()
1341 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_guest()
1350 vmx->guest_state_loaded = true; in vmx_prepare_switch_to_guest()
1357 if (!vmx->guest_state_loaded) in vmx_prepare_switch_to_host()
1360 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_host()
1362 ++vmx->vcpu.stat.host_state_reload; in vmx_prepare_switch_to_host()
1365 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_host()
1367 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { in vmx_prepare_switch_to_host()
1368 kvm_load_ldt(host_state->ldt_sel); in vmx_prepare_switch_to_host()
1370 load_gs_index(host_state->gs_sel); in vmx_prepare_switch_to_host()
1372 loadsegment(gs, host_state->gs_sel); in vmx_prepare_switch_to_host()
1375 if (host_state->fs_sel & 7) in vmx_prepare_switch_to_host()
1376 loadsegment(fs, host_state->fs_sel); in vmx_prepare_switch_to_host()
1378 if (unlikely(host_state->ds_sel | host_state->es_sel)) { in vmx_prepare_switch_to_host()
1379 loadsegment(ds, host_state->ds_sel); in vmx_prepare_switch_to_host()
1380 loadsegment(es, host_state->es_sel); in vmx_prepare_switch_to_host()
1385 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_prepare_switch_to_host()
1388 vmx->guest_state_loaded = false; in vmx_prepare_switch_to_host()
1389 vmx->guest_uret_msrs_loaded = false; in vmx_prepare_switch_to_host()
1396 if (vmx->guest_state_loaded) in vmx_read_guest_kernel_gs_base()
1397 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_read_guest_kernel_gs_base()
1399 return vmx->msr_guest_kernel_gs_base; in vmx_read_guest_kernel_gs_base()
1405 if (vmx->guest_state_loaded) in vmx_write_guest_kernel_gs_base()
1408 vmx->msr_guest_kernel_gs_base = data; in vmx_write_guest_kernel_gs_base()
1415 unsigned int old = vmx->ple_window; in grow_ple_window()
1417 vmx->ple_window = __grow_ple_window(old, ple_window, in grow_ple_window()
1421 if (vmx->ple_window != old) { in grow_ple_window()
1422 vmx->ple_window_dirty = true; in grow_ple_window()
1423 trace_kvm_ple_window_update(vcpu->vcpu_id, in grow_ple_window()
1424 vmx->ple_window, old); in grow_ple_window()
1431 unsigned int old = vmx->ple_window; in shrink_ple_window()
1433 vmx->ple_window = __shrink_ple_window(old, ple_window, in shrink_ple_window()
1437 if (vmx->ple_window != old) { in shrink_ple_window()
1438 vmx->ple_window_dirty = true; in shrink_ple_window()
1439 trace_kvm_ple_window_update(vcpu->vcpu_id, in shrink_ple_window()
1440 vmx->ple_window, old); in shrink_ple_window()
1448 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; in vmx_vcpu_load_vmcs()
1452 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load_vmcs()
1456 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to in vmx_vcpu_load_vmcs()
1463 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load_vmcs()
1469 if (prev != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load_vmcs()
1470 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load_vmcs()
1471 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load_vmcs()
1477 * performs IBPB on nested VM-Exit (a single nested transition in vmx_vcpu_load_vmcs()
1480 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) in vmx_vcpu_load_vmcs()
1494 * Linux uses per-cpu TSS and GDT, so set these when switching in vmx_vcpu_load_vmcs()
1498 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); in vmx_vcpu_load_vmcs()
1507 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load_vmcs()
1517 if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) in vmx_vcpu_load()
1545 if (vmx->rmode.vm86_active) { in vmx_get_rflags()
1547 save_rflags = vmx->rmode.save_rflags; in vmx_get_rflags()
1550 vmx->rflags = rflags; in vmx_get_rflags()
1552 return vmx->rflags; in vmx_get_rflags()
1567 vmx->rflags = rflags; in vmx_set_rflags()
1573 vmx->rflags = rflags; in vmx_set_rflags()
1574 if (vmx->rmode.vm86_active) { in vmx_set_rflags()
1575 vmx->rmode.save_rflags = rflags; in vmx_set_rflags()
1580 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) in vmx_set_rflags()
1581 vmx->emulation_required = vmx_emulation_required(vcpu); in vmx_set_rflags()
1627 if (data & vmx->pt_desc.ctl_bitmask) in vmx_rtit_ctl_check()
1634 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && in vmx_rtit_ctl_check()
1636 data != vmx->pt_desc.guest.ctl) in vmx_rtit_ctl_check()
1646 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1654 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); in vmx_rtit_ctl_check()
1655 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && in vmx_rtit_ctl_check()
1659 value = intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1661 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1665 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); in vmx_rtit_ctl_check()
1666 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1676 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2)) in vmx_rtit_ctl_check()
1679 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2)) in vmx_rtit_ctl_check()
1682 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2)) in vmx_rtit_ctl_check()
1685 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2)) in vmx_rtit_ctl_check()
1701 if (to_vmx(vcpu)->exit_reason.enclave_mode) { in vmx_check_emulate_instruction()
1707 if ((to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in vmx_check_emulate_instruction()
1716 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; in skip_emulated_instruction()
1725 * (namely Hyper-V) don't set it due to it being undefined behavior, in skip_emulated_instruction()
1736 * the RIP that actually triggered the VM-Exit. But, because in skip_emulated_instruction()
1737 * most instructions that cause VM-Exit will #UD in an enclave, in skip_emulated_instruction()
1738 * most instruction-based VM-Exits simply do not occur. in skip_emulated_instruction()
1745 * WARN if KVM tries to skip a non-zero length instruction on in skip_emulated_instruction()
1746 * a VM-Exit from an enclave. in skip_emulated_instruction()
1752 "skipping instruction after SGX enclave VM-Exit"); in skip_emulated_instruction()
1758 * We need to mask out the high 32 bits of RIP if not in 64-bit in skip_emulated_instruction()
1759 * mode, but just finding out that we are in 64-bit mode is in skip_emulated_instruction()
1779 * Recognizes a pending MTF VM-exit and records the nested state for later
1791 * Per the SDM, MTF takes priority over debug-trap exceptions besides in vmx_update_emulated_instruction()
1792 * TSS T-bit traps and ICEBP (INT1). KVM doesn't emulate T-bit traps in vmx_update_emulated_instruction()
1794 * intercepted #DB deliberately avoids single-step #DB and MTF updates in vmx_update_emulated_instruction()
1797 * any #DB exception pending delivery must be a debug-trap of lower in vmx_update_emulated_instruction()
1802 (!vcpu->arch.exception.pending || in vmx_update_emulated_instruction()
1803 vcpu->arch.exception.vector == DB_VECTOR) && in vmx_update_emulated_instruction()
1804 (!vcpu->arch.exception_vmexit.pending || in vmx_update_emulated_instruction()
1805 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) { in vmx_update_emulated_instruction()
1806 vmx->nested.mtf_pending = true; in vmx_update_emulated_instruction()
1809 vmx->nested.mtf_pending = false; in vmx_update_emulated_instruction()
1827 if (kvm_hlt_in_guest(vcpu->kvm) && in vmx_clear_hlt()
1834 struct kvm_queued_exception *ex = &vcpu->arch.exception; in vmx_inject_exception()
1835 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; in vmx_inject_exception()
1840 if (ex->has_error_code) { in vmx_inject_exception()
1847 * ABI lets userspace shove in arbitrary 32-bit values. Drop in vmx_inject_exception()
1848 * the upper bits to avoid VM-Fail, losing information that in vmx_inject_exception()
1851 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code); in vmx_inject_exception()
1855 if (vmx->rmode.vm86_active) { in vmx_inject_exception()
1857 if (kvm_exception_is_soft(ex->vector)) in vmx_inject_exception()
1858 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_exception()
1859 kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip); in vmx_inject_exception()
1863 WARN_ON_ONCE(vmx->emulation_required); in vmx_inject_exception()
1865 if (kvm_exception_is_soft(ex->vector)) { in vmx_inject_exception()
1867 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_exception()
1886 uret_msr->load_into_hardware = load_into_hardware; in vmx_setup_uret_msr()
1904 load_syscall_msrs = is_long_mode(&vmx->vcpu) && in vmx_setup_uret_msrs()
1905 (vmx->vcpu.arch.efer & EFER_SCE); in vmx_setup_uret_msrs()
1914 guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDTSCP) || in vmx_setup_uret_msrs()
1915 guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDPID)); in vmx_setup_uret_msrs()
1927 * next VM-Enter. in vmx_setup_uret_msrs()
1929 vmx->guest_uret_msrs_loaded = false; in vmx_setup_uret_msrs()
1937 return vmcs12->tsc_offset; in vmx_get_l2_tsc_offset()
1948 return vmcs12->tsc_multiplier; in vmx_get_l2_tsc_multiplier()
1955 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in vmx_write_tsc_offset()
1960 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); in vmx_write_tsc_multiplier()
1986 WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits & in is_vmx_feature_control_msr_valid()
1989 if (!msr->host_initiated && in is_vmx_feature_control_msr_valid()
1990 (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED)) in is_vmx_feature_control_msr_valid()
1993 if (msr->host_initiated) in is_vmx_feature_control_msr_valid()
1996 valid_bits = vmx->msr_ia32_feature_control_valid_bits; in is_vmx_feature_control_msr_valid()
1998 return !(msr->data & ~valid_bits); in is_vmx_feature_control_msr_valid()
2014 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
2015 * Returns 0 on success, non-0 otherwise.
2024 switch (msr_info->index) { in vmx_get_msr()
2027 msr_info->data = vmcs_readl(GUEST_FS_BASE); in vmx_get_msr()
2030 msr_info->data = vmcs_readl(GUEST_GS_BASE); in vmx_get_msr()
2033 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); in vmx_get_msr()
2039 if (!msr_info->host_initiated && in vmx_get_msr()
2040 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) in vmx_get_msr()
2044 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_get_msr()
2047 msr_info->data = vmx->msr_ia32_umwait_control; in vmx_get_msr()
2050 if (!msr_info->host_initiated && in vmx_get_msr()
2054 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
2057 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); in vmx_get_msr()
2060 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); in vmx_get_msr()
2063 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); in vmx_get_msr()
2067 (!msr_info->host_initiated && in vmx_get_msr()
2070 msr_info->data = vmcs_read64(GUEST_BNDCFGS); in vmx_get_msr()
2073 if (!msr_info->host_initiated && in vmx_get_msr()
2074 !(vmx->msr_ia32_feature_control & in vmx_get_msr()
2077 msr_info->data = vcpu->arch.mcg_ext_ctl; in vmx_get_msr()
2080 msr_info->data = vmx->msr_ia32_feature_control; in vmx_get_msr()
2083 if (!msr_info->host_initiated && in vmx_get_msr()
2086 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash in vmx_get_msr()
2087 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; in vmx_get_msr()
2092 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, in vmx_get_msr()
2093 &msr_info->data)) in vmx_get_msr()
2098 * instead of just ignoring the features, different Hyper-V in vmx_get_msr()
2103 if (!msr_info->host_initiated && guest_cpu_cap_has_evmcs(vcpu)) in vmx_get_msr()
2104 nested_evmcs_filter_control_msr(vcpu, msr_info->index, in vmx_get_msr()
2105 &msr_info->data); in vmx_get_msr()
2111 msr_info->data = vmx->pt_desc.guest.ctl; in vmx_get_msr()
2116 msr_info->data = vmx->pt_desc.guest.status; in vmx_get_msr()
2120 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2123 msr_info->data = vmx->pt_desc.guest.cr3_match; in vmx_get_msr()
2127 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2129 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2132 msr_info->data = vmx->pt_desc.guest.output_base; in vmx_get_msr()
2136 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2138 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2141 msr_info->data = vmx->pt_desc.guest.output_mask; in vmx_get_msr()
2144 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; in vmx_get_msr()
2146 (index >= 2 * vmx->pt_desc.num_address_ranges)) in vmx_get_msr()
2149 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; in vmx_get_msr()
2151 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; in vmx_get_msr()
2154 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); in vmx_get_msr()
2158 msr = vmx_find_uret_msr(vmx, msr_info->index); in vmx_get_msr()
2160 msr_info->data = msr->data; in vmx_get_msr()
2196 * Returns 0 on success, non-0 otherwise.
2204 u32 msr_index = msr_info->index; in vmx_set_msr()
2205 u64 data = msr_info->data; in vmx_set_msr()
2227 * Always intercepting WRMSR could incur non-negligible in vmx_set_msr()
2230 * upon the first write with a non-zero value (indicating in vmx_set_msr()
2238 vcpu->arch.xfd_no_write_intercept = true; in vmx_set_msr()
2245 get_vmcs12(vcpu)->guest_sysenter_cs = data; in vmx_set_msr()
2251 get_vmcs12(vcpu)->guest_sysenter_eip = data; in vmx_set_msr()
2258 get_vmcs12(vcpu)->guest_sysenter_esp = data; in vmx_set_msr()
2265 invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); in vmx_set_msr()
2275 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & in vmx_set_msr()
2277 get_vmcs12(vcpu)->guest_ia32_debugctl = data; in vmx_set_msr()
2280 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && in vmx_set_msr()
2287 (!msr_info->host_initiated && in vmx_set_msr()
2295 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) || in vmx_set_msr()
2296 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS))) in vmx_set_msr()
2297 get_vmcs12(vcpu)->guest_bndcfgs = data; in vmx_set_msr()
2302 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_set_msr()
2305 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ in vmx_set_msr()
2309 vmx->msr_ia32_umwait_control = data; in vmx_set_msr()
2312 if (!msr_info->host_initiated && in vmx_set_msr()
2319 vmx->spec_ctrl = data; in vmx_set_msr()
2324 * For non-nested: in vmx_set_msr()
2325 * When it's written (to non-zero) for the first time, pass in vmx_set_msr()
2340 if (!msr_info->host_initiated && in vmx_set_msr()
2341 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) in vmx_set_msr()
2352 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) in vmx_set_msr()
2353 get_vmcs12(vcpu)->guest_ia32_pat = data; in vmx_set_msr()
2359 if ((!msr_info->host_initiated && in vmx_set_msr()
2360 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2364 vcpu->arch.mcg_ext_ctl = data; in vmx_set_msr()
2370 vmx->msr_ia32_feature_control = data; in vmx_set_msr()
2371 if (msr_info->host_initiated && data == 0) in vmx_set_msr()
2389 if (!msr_info->host_initiated && in vmx_set_msr()
2391 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && in vmx_set_msr()
2392 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) in vmx_set_msr()
2394 vmx->msr_ia32_sgxlepubkeyhash in vmx_set_msr()
2395 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; in vmx_set_msr()
2398 if (!msr_info->host_initiated) in vmx_set_msr()
2399 return 1; /* they are read-only */ in vmx_set_msr()
2406 vmx->nested.vmxon) in vmx_set_msr()
2409 vmx->pt_desc.guest.ctl = data; in vmx_set_msr()
2417 vmx->pt_desc.guest.status = data; in vmx_set_msr()
2422 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2425 vmx->pt_desc.guest.cr3_match = data; in vmx_set_msr()
2430 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2432 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2437 vmx->pt_desc.guest.output_base = data; in vmx_set_msr()
2442 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2444 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2447 vmx->pt_desc.guest.output_mask = data; in vmx_set_msr()
2452 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; in vmx_set_msr()
2453 if (index >= 2 * vmx->pt_desc.num_address_ranges) in vmx_set_msr()
2458 vmx->pt_desc.guest.addr_b[index / 2] = data; in vmx_set_msr()
2460 vmx->pt_desc.guest.addr_a[index / 2] = data; in vmx_set_msr()
2508 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2511 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
2518 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_cache_reg()
2520 vcpu->arch.cr0 &= ~guest_owned_bits; in vmx_cache_reg()
2521 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; in vmx_cache_reg()
2529 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_cache_reg()
2532 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_cache_reg()
2534 vcpu->arch.cr4 &= ~guest_owned_bits; in vmx_cache_reg()
2535 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; in vmx_cache_reg()
2538 KVM_BUG_ON(1, vcpu->kvm); in vmx_cache_reg()
2566 return -EIO; in adjust_vmx_controls()
2616 return -EIO; in setup_vmcs_config()
2622 return -EIO; in setup_vmcs_config()
2640 &vmx_cap->ept, &vmx_cap->vpid); in setup_vmcs_config()
2643 vmx_cap->ept) { in setup_vmcs_config()
2645 "1-setting enable EPT VM-execution control\n"); in setup_vmcs_config()
2648 return -EIO; in setup_vmcs_config()
2650 vmx_cap->ept = 0; in setup_vmcs_config()
2654 vmx_cap->vpid) { in setup_vmcs_config()
2656 "1-setting enable VPID VM-execution control\n"); in setup_vmcs_config()
2659 return -EIO; in setup_vmcs_config()
2661 vmx_cap->vpid = 0; in setup_vmcs_config()
2676 return -EIO; in setup_vmcs_config()
2682 return -EIO; in setup_vmcs_config()
2694 return -EIO; in setup_vmcs_config()
2703 pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n", in setup_vmcs_config()
2707 return -EIO; in setup_vmcs_config()
2736 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ in setup_vmcs_config()
2738 return -EIO; in setup_vmcs_config()
2743 * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always in setup_vmcs_config()
2747 return -EIO; in setup_vmcs_config()
2750 /* Require Write-Back (WB) memory type for VMCS accesses. */ in setup_vmcs_config()
2752 return -EIO; in setup_vmcs_config()
2756 vmcs_conf->basic = basic_msr; in setup_vmcs_config()
2757 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; in setup_vmcs_config()
2758 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; in setup_vmcs_config()
2759 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; in setup_vmcs_config()
2760 vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control; in setup_vmcs_config()
2761 vmcs_conf->vmexit_ctrl = _vmexit_control; in setup_vmcs_config()
2762 vmcs_conf->vmentry_ctrl = _vmentry_control; in setup_vmcs_config()
2763 vmcs_conf->misc = misc_msr; in setup_vmcs_config()
2809 return -EIO; in vmx_check_processor_compat()
2813 return -EIO; in vmx_check_processor_compat()
2819 return -EIO; in vmx_check_processor_compat()
2841 return -EFAULT; in kvm_cpu_vmxon()
2851 return -EBUSY; in vmx_enable_virtualization_cpu()
2854 * This can happen if we hot-added a CPU but failed to allocate in vmx_enable_virtualization_cpu()
2858 return -EFAULT; in vmx_enable_virtualization_cpu()
2907 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; in alloc_vmcs_cpu()
2909 vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic); in alloc_vmcs_cpu()
2912 vmcs->hdr.shadow_vmcs = 1; in alloc_vmcs_cpu()
2926 if (!loaded_vmcs->vmcs) in free_loaded_vmcs()
2929 free_vmcs(loaded_vmcs->vmcs); in free_loaded_vmcs()
2930 loaded_vmcs->vmcs = NULL; in free_loaded_vmcs()
2931 if (loaded_vmcs->msr_bitmap) in free_loaded_vmcs()
2932 free_page((unsigned long)loaded_vmcs->msr_bitmap); in free_loaded_vmcs()
2933 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); in free_loaded_vmcs()
2938 loaded_vmcs->vmcs = alloc_vmcs(false); in alloc_loaded_vmcs()
2939 if (!loaded_vmcs->vmcs) in alloc_loaded_vmcs()
2940 return -ENOMEM; in alloc_loaded_vmcs()
2942 vmcs_clear(loaded_vmcs->vmcs); in alloc_loaded_vmcs()
2944 loaded_vmcs->shadow_vmcs = NULL; in alloc_loaded_vmcs()
2945 loaded_vmcs->hv_timer_soft_disabled = false; in alloc_loaded_vmcs()
2946 loaded_vmcs->cpu = -1; in alloc_loaded_vmcs()
2947 loaded_vmcs->launched = 0; in alloc_loaded_vmcs()
2950 loaded_vmcs->msr_bitmap = (unsigned long *) in alloc_loaded_vmcs()
2952 if (!loaded_vmcs->msr_bitmap) in alloc_loaded_vmcs()
2954 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); in alloc_loaded_vmcs()
2957 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); in alloc_loaded_vmcs()
2958 memset(&loaded_vmcs->controls_shadow, 0, in alloc_loaded_vmcs()
2965 return -ENOMEM; in alloc_loaded_vmcs()
2988 return -ENOMEM; in alloc_kvm_area()
2993 * vmcs->revision_id to KVM_EVMCS_VERSION instead of in alloc_kvm_area()
3002 vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic); in alloc_kvm_area()
3021 save->selector &= ~SEGMENT_RPL_MASK; in fix_pmode_seg()
3022 save->dpl = save->selector & SEGMENT_RPL_MASK; in fix_pmode_seg()
3023 save->s = 1; in fix_pmode_seg()
3034 * Update real mode segment cache. It may be not up-to-date if segment in enter_pmode()
3037 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3038 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3039 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3040 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3041 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3042 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3044 vmx->rmode.vm86_active = 0; in enter_pmode()
3046 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3050 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
3058 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3059 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
3060 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
3061 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
3062 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
3063 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
3087 if (save->base & 0xf) in fix_rmode_seg()
3092 vmcs_write16(sf->selector, var.selector); in fix_rmode_seg()
3093 vmcs_writel(sf->base, var.base); in fix_rmode_seg()
3094 vmcs_write32(sf->limit, var.limit); in fix_rmode_seg()
3095 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); in fix_rmode_seg()
3102 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); in enter_rmode()
3107 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0 in enter_rmode()
3108 * should VM-Fail and KVM should reject userspace attempts to stuff in enter_rmode()
3113 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
3114 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
3115 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
3116 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
3117 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
3118 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
3119 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
3121 vmx->rmode.vm86_active = 1; in enter_rmode()
3125 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); in enter_rmode()
3126 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); in enter_rmode()
3130 vmx->rmode.save_rflags = flags; in enter_rmode()
3138 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
3139 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
3140 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
3141 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
3142 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
3143 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
3154 vcpu->arch.efer = efer; in vmx_set_efer()
3161 if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm)) in vmx_set_efer()
3185 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
3190 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
3201 * the CPU is not required to invalidate guest-physical mappings on in vmx_flush_tlb_all()
3202 * VM-Entry, even if VPID is disabled. Guest-physical mappings are in vmx_flush_tlb_all()
3204 * (INVVPID also isn't required to invalidate guest-physical mappings). in vmx_flush_tlb_all()
3212 vpid_sync_vcpu_single(vmx->vpid); in vmx_flush_tlb_all()
3213 vpid_sync_vcpu_single(vmx->nested.vpid02); in vmx_flush_tlb_all()
3222 return to_vmx(vcpu)->vpid; in vmx_get_current_vpid()
3227 struct kvm_mmu *mmu = vcpu->arch.mmu; in vmx_flush_tlb_current()
3228 u64 root_hpa = mmu->root.hpa; in vmx_flush_tlb_current()
3236 mmu->root_role.level)); in vmx_flush_tlb_current()
3254 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are in vmx_flush_tlb_guest()
3255 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is in vmx_flush_tlb_guest()
3256 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), in vmx_flush_tlb_guest()
3264 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()
3270 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); in vmx_ept_load_pdptrs()
3271 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); in vmx_ept_load_pdptrs()
3272 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); in vmx_ept_load_pdptrs()
3273 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); in vmx_ept_load_pdptrs()
3279 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3284 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); in ept_save_pdptrs()
3285 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); in ept_save_pdptrs()
3286 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); in ept_save_pdptrs()
3287 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); in ept_save_pdptrs()
3300 if (to_vmx(vcpu)->nested.vmxon) in vmx_is_valid_cr0()
3322 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
3325 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
3331 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3335 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3345 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If in vmx_set_cr0()
3362 * e.g. after nested VM-Enter. in vmx_set_cr0()
3375 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS; in vmx_set_cr0()
3379 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */ in vmx_set_cr0()
3384 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but in vmx_set_cr0()
3385 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG. in vmx_set_cr0()
3391 /* depends on vcpu->arch.cr0 to be set to a new value */ in vmx_set_cr0()
3392 vmx->emulation_required = vmx_emulation_required(vcpu); in vmx_set_cr0()
3418 struct kvm *kvm = vcpu->kvm; in vmx_load_mmu_pgd()
3430 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; in vmx_load_mmu_pgd()
3432 guest_cr3 = vcpu->arch.cr3; in vmx_load_mmu_pgd()
3433 else /* vmcs.GUEST_CR3 is already up-to-date. */ in vmx_load_mmu_pgd()
3455 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) in vmx_is_valid_cr4()
3475 else if (vmx->rmode.vm86_active) in vmx_set_cr4()
3490 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
3504 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in in vmx_set_cr4()
3506 * to be manually disabled when guest switches to non-paging in vmx_set_cr4()
3530 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3531 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3533 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3535 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3536 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3539 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3540 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3541 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3543 var->unusable = (ar >> 16) & 1; in vmx_get_segment()
3544 var->type = ar & 15; in vmx_get_segment()
3545 var->s = (ar >> 4) & 1; in vmx_get_segment()
3546 var->dpl = (ar >> 5) & 3; in vmx_get_segment()
3554 var->present = !var->unusable; in vmx_get_segment()
3555 var->avl = (ar >> 12) & 1; in vmx_get_segment()
3556 var->l = (ar >> 13) & 1; in vmx_get_segment()
3557 var->db = (ar >> 14) & 1; in vmx_get_segment()
3558 var->g = (ar >> 15) & 1; in vmx_get_segment()
3565 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3577 if (unlikely(vmx->rmode.vm86_active)) in __vmx_get_cpl()
3601 ar = var->type & 15; in vmx_segment_access_rights()
3602 ar |= (var->s & 1) << 4; in vmx_segment_access_rights()
3603 ar |= (var->dpl & 3) << 5; in vmx_segment_access_rights()
3604 ar |= (var->present & 1) << 7; in vmx_segment_access_rights()
3605 ar |= (var->avl & 1) << 12; in vmx_segment_access_rights()
3606 ar |= (var->l & 1) << 13; in vmx_segment_access_rights()
3607 ar |= (var->db & 1) << 14; in vmx_segment_access_rights()
3608 ar |= (var->g & 1) << 15; in vmx_segment_access_rights()
3609 ar |= (var->unusable || !var->present) << 16; in vmx_segment_access_rights()
3621 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in __vmx_set_segment()
3622 vmx->rmode.segs[seg] = *var; in __vmx_set_segment()
3624 vmcs_write16(sf->selector, var->selector); in __vmx_set_segment()
3625 else if (var->s) in __vmx_set_segment()
3626 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in __vmx_set_segment()
3630 vmcs_writel(sf->base, var->base); in __vmx_set_segment()
3631 vmcs_write32(sf->limit, var->limit); in __vmx_set_segment()
3632 vmcs_write16(sf->selector, var->selector); in __vmx_set_segment()
3646 var->type |= 0x1; /* Accessed */ in __vmx_set_segment()
3648 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); in __vmx_set_segment()
3655 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); in vmx_set_segment()
3668 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); in vmx_get_idt()
3669 dt->address = vmcs_readl(GUEST_IDTR_BASE); in vmx_get_idt()
3674 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); in vmx_set_idt()
3675 vmcs_writel(GUEST_IDTR_BASE, dt->address); in vmx_set_idt()
3680 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); in vmx_get_gdt()
3681 dt->address = vmcs_readl(GUEST_GDTR_BASE); in vmx_get_gdt()
3686 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); in vmx_set_gdt()
3687 vmcs_writel(GUEST_GDTR_BASE, dt->address); in vmx_set_gdt()
3876 * - Add checks on RIP in __vmx_guest_state_valid()
3877 * - Add checks on RFLAGS in __vmx_guest_state_valid()
3891 return -EFAULT; in init_rmode_tss()
3896 return -EFAULT; in init_rmode_tss()
3899 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8))) in init_rmode_tss()
3900 return -EFAULT; in init_rmode_tss()
3912 /* Protect kvm_vmx->ept_identity_pagetable_done. */ in init_rmode_identity_map()
3913 mutex_lock(&kvm->slots_lock); in init_rmode_identity_map()
3915 if (likely(kvm_vmx->ept_identity_pagetable_done)) in init_rmode_identity_map()
3918 if (!kvm_vmx->ept_identity_map_addr) in init_rmode_identity_map()
3919 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; in init_rmode_identity_map()
3923 kvm_vmx->ept_identity_map_addr, in init_rmode_identity_map()
3930 /* Set up identity-mapping pagetable for EPT in real mode */ in init_rmode_identity_map()
3935 r = -EFAULT; in init_rmode_identity_map()
3939 kvm_vmx->ept_identity_pagetable_done = true; in init_rmode_identity_map()
3942 mutex_unlock(&kvm->slots_lock); in init_rmode_identity_map()
3951 vmcs_write16(sf->selector, 0); in seg_setup()
3952 vmcs_writel(sf->base, 0); in seg_setup()
3953 vmcs_write32(sf->limit, 0xffff); in seg_setup()
3958 vmcs_write32(sf->ar_bytes, ar); in seg_setup()
3989 * When KVM is a nested hypervisor on top of Hyper-V and uses in vmx_msr_bitmap_l01_changed()
3994 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; in vmx_msr_bitmap_l01_changed()
3996 if (evmcs->hv_enlightenments_control.msr_bitmap) in vmx_msr_bitmap_l01_changed()
3997 evmcs->hv_clean_fields &= in vmx_msr_bitmap_l01_changed()
4001 vmx->nested.force_msr_bitmap_recalc = true; in vmx_msr_bitmap_l01_changed()
4007 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_disable_intercept_for_msr()
4022 clear_bit(idx, vmx->shadow_msr_intercept.read); in vmx_disable_intercept_for_msr()
4024 clear_bit(idx, vmx->shadow_msr_intercept.write); in vmx_disable_intercept_for_msr()
4049 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_enable_intercept_for_msr()
4064 set_bit(idx, vmx->shadow_msr_intercept.read); in vmx_enable_intercept_for_msr()
4066 set_bit(idx, vmx->shadow_msr_intercept.write); in vmx_enable_intercept_for_msr()
4079 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves in vmx_update_msr_bitmap_x2apic()
4086 u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap; in vmx_update_msr_bitmap_x2apic()
4102 if (mode == vmx->x2apic_msr_bitmap_mode) in vmx_update_msr_bitmap_x2apic()
4105 vmx->x2apic_msr_bitmap_mode = mode; in vmx_update_msr_bitmap_x2apic()
4108 * Reset the bitmap for MSRs 0x800 - 0x83f. Leave AMD's uber-extended in vmx_update_msr_bitmap_x2apic()
4112 * mode, only the current timer count needs on-demand emulation by KVM. in vmx_update_msr_bitmap_x2apic()
4115 msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic); in vmx_update_msr_bitmap_x2apic()
4139 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_update_intercept_for_msr()
4146 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) { in pt_update_intercept_for_msr()
4171 if (!test_bit(i, vmx->shadow_msr_intercept.read)) in vmx_msr_filter_changed()
4174 if (!test_bit(i, vmx->shadow_msr_intercept.write)) in vmx_msr_filter_changed()
4187 if (vcpu->mode == IN_GUEST_MODE) { in kvm_vcpu_trigger_posted_interrupt()
4192 * event is being sent from a fastpath VM-Exit handler, in in kvm_vcpu_trigger_posted_interrupt()
4194 * re-entering the guest. in kvm_vcpu_trigger_posted_interrupt()
4199 * Case 1: vCPU stays in non-root mode. Sending a notification in kvm_vcpu_trigger_posted_interrupt()
4203 * PIR will be synced to the vIRR before re-entering the guest. in kvm_vcpu_trigger_posted_interrupt()
4215 __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); in kvm_vcpu_trigger_posted_interrupt()
4222 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest(). in kvm_vcpu_trigger_posted_interrupt()
4234 * and freed, and must not be accessed outside of vcpu->mutex. The in vmx_deliver_nested_posted_interrupt()
4235 * vCPU's cached PI NV is valid if and only if posted interrupts in vmx_deliver_nested_posted_interrupt()
4240 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
4245 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
4249 * This pairs with the smp_mb_*() after setting vcpu->mode in in vmx_deliver_nested_posted_interrupt()
4252 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as in vmx_deliver_nested_posted_interrupt()
4256 * vcpu->requests and the load from vcpu->mode. in vmx_deliver_nested_posted_interrupt()
4264 return -1; in vmx_deliver_nested_posted_interrupt()
4268 * 1. If target vcpu is running(non-root mode), send posted interrupt
4282 /* Note, this is called iff the local APIC is in-kernel. */ in vmx_deliver_posted_interrupt()
4283 if (!vcpu->arch.apic->apicv_active) in vmx_deliver_posted_interrupt()
4284 return -1; in vmx_deliver_posted_interrupt()
4286 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4290 if (pi_test_and_set_on(&vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4295 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is in vmx_deliver_posted_interrupt()
4297 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE. in vmx_deliver_posted_interrupt()
4306 struct kvm_vcpu *vcpu = apic->vcpu; in vmx_deliver_interrupt()
4313 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, in vmx_deliver_interrupt()
4319 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4321 * Note that host-state that does change is set elsewhere. E.g., host-state
4340 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_set_constant_host_state()
4345 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_set_constant_host_state()
4371 * SYSENTER is used for 32-bit system calls on either 32-bit or in vmx_set_constant_host_state()
4372 * 64-bit kernels. It is always zero If neither is allowed, otherwise in vmx_set_constant_host_state()
4373 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may in vmx_set_constant_host_state()
4393 struct kvm_vcpu *vcpu = &vmx->vcpu; in set_cr4_guest_host_mask()
4395 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & in set_cr4_guest_host_mask()
4396 ~vcpu->arch.cr4_guest_rsvd_bits; in set_cr4_guest_host_mask()
4398 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; in set_cr4_guest_host_mask()
4399 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS; in set_cr4_guest_host_mask()
4401 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4402 vcpu->arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4403 ~get_vmcs12(vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4404 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4411 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
4464 vmx->nested.update_vmcs01_apicv_status = true; in vmx_refresh_apicv_exec_ctrl()
4504 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4507 if (!cpu_need_tpr_shadow(&vmx->vcpu)) in vmx_exec_control()
4523 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4526 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4539 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu)) in vmx_tertiary_exec_control()
4555 * If the control is for an opt-in feature, clear the control if the in vmx_adjust_secondary_exec_control()
4557 * control is opt-out, i.e. an exiting control, clear the control if in vmx_adjust_secondary_exec_control()
4570 kvm_check_has_quirk(vmx->vcpu.kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) { in vmx_adjust_secondary_exec_control()
4579 vmx->nested.msrs.secondary_ctls_high |= control; in vmx_adjust_secondary_exec_control()
4581 vmx->nested.msrs.secondary_ctls_high &= ~control; in vmx_adjust_secondary_exec_control()
4592 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4602 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4611 struct kvm_vcpu *vcpu = &vmx->vcpu; in vmx_secondary_exec_control()
4619 if (vmx->vpid == 0) in vmx_secondary_exec_control()
4628 if (kvm_pause_in_guest(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4657 if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) in vmx_secondary_exec_control()
4688 if (!vcpu->kvm->arch.bus_lock_detection_enabled) in vmx_secondary_exec_control()
4691 if (!kvm_notify_vmexit_enabled(vcpu->kvm)) in vmx_secondary_exec_control()
4699 return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table)); in vmx_get_pid_table_order()
4710 if (kvm_vmx->pid_table) in vmx_alloc_ipiv_pid_table()
4716 return -ENOMEM; in vmx_alloc_ipiv_pid_table()
4718 kvm_vmx->pid_table = (void *)page_address(pages); in vmx_alloc_ipiv_pid_table()
4731 struct kvm *kvm = vmx->vcpu.kvm; in init_vmcs()
4738 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); in init_vmcs()
4749 if (vmx->ve_info) in init_vmcs()
4751 __pa(vmx->ve_info)); in init_vmcs()
4757 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) { in init_vmcs()
4766 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in init_vmcs()
4769 if (vmx_can_use_ipiv(&vmx->vcpu)) { in init_vmcs()
4770 vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table)); in init_vmcs()
4771 vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1); in init_vmcs()
4776 vmx->ple_window = ple_window; in init_vmcs()
4777 vmx->ple_window_dirty = true; in init_vmcs()
4781 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); in init_vmcs()
4798 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in init_vmcs()
4800 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in init_vmcs()
4803 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in init_vmcs()
4810 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); in init_vmcs()
4811 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); in init_vmcs()
4815 if (vmx->vpid != 0) in init_vmcs()
4816 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in init_vmcs()
4822 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in init_vmcs()
4826 vmx_write_encls_bitmap(&vmx->vcpu, NULL); in init_vmcs()
4829 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); in init_vmcs()
4831 vmx->pt_desc.guest.output_mask = 0x7F; in init_vmcs()
4842 if (cpu_need_tpr_shadow(&vmx->vcpu)) in init_vmcs()
4844 __pa(vmx->vcpu.arch.apic->regs)); in init_vmcs()
4858 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) in __vmx_vcpu_reset()
4859 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); in __vmx_vcpu_reset()
4863 vmx->nested.posted_intr_nv = -1; in __vmx_vcpu_reset()
4864 vmx->nested.vmxon_ptr = INVALID_GPA; in __vmx_vcpu_reset()
4865 vmx->nested.current_vmptr = INVALID_GPA; in __vmx_vcpu_reset()
4868 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in __vmx_vcpu_reset()
4871 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) in __vmx_vcpu_reset()
4872 vcpu->arch.microcode_version = 0x100000000ULL; in __vmx_vcpu_reset()
4873 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; in __vmx_vcpu_reset()
4879 vmx->pi_desc.nv = POSTED_INTR_VECTOR; in __vmx_vcpu_reset()
4880 __pi_set_sn(&vmx->pi_desc); in __vmx_vcpu_reset()
4890 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
4891 vmx->spec_ctrl = 0; in vmx_vcpu_reset()
4893 vmx->msr_ia32_umwait_control = 0; in vmx_vcpu_reset()
4895 vmx->hv_deadline_tsc = -1; in vmx_vcpu_reset()
4937 vpid_sync_context(vmx->vpid); in vmx_vcpu_reset()
4962 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
4964 trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected); in vmx_inject_irq()
4966 ++vcpu->stat.irq_injections; in vmx_inject_irq()
4967 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
4969 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
4970 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
4975 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
4978 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4992 * Tracking the NMI-blocked state in software is built upon in vmx_inject_nmi()
4994 * well-behaving guests: They have to keep IRQs disabled at in vmx_inject_nmi()
4999 vmx->loaded_vmcs->soft_vnmi_blocked = 1; in vmx_inject_nmi()
5000 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_inject_nmi()
5003 ++vcpu->stat.nmi_injections; in vmx_inject_nmi()
5004 vmx->loaded_vmcs->nmi_known_unmasked = false; in vmx_inject_nmi()
5006 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
5023 return vmx->loaded_vmcs->soft_vnmi_blocked; in vmx_get_nmi_mask()
5024 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_get_nmi_mask()
5027 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_get_nmi_mask()
5036 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
5037 vmx->loaded_vmcs->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
5038 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
5041 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
5056 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_blocked()
5066 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
5067 return -EBUSY; in vmx_nmi_allowed()
5069 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ in vmx_nmi_allowed()
5071 return -EBUSY; in vmx_nmi_allowed()
5093 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_interrupt_allowed()
5094 return -EBUSY; in vmx_interrupt_allowed()
5097 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, in vmx_interrupt_allowed()
5101 return -EBUSY; in vmx_interrupt_allowed()
5113 mutex_lock(&kvm->slots_lock); in vmx_set_tss_addr()
5116 mutex_unlock(&kvm->slots_lock); in vmx_set_tss_addr()
5121 to_kvm_vmx(kvm)->tss_addr = addr; in vmx_set_tss_addr()
5128 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; in vmx_set_identity_map_addr()
5140 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5142 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in rmode_exception()
5146 return !(vcpu->guest_debug & in rmode_exception()
5170 if (vcpu->arch.halt_request) { in handle_rmode_exception()
5171 vcpu->arch.halt_request = 0; in handle_rmode_exception()
5201 * - Guest CPL == 3 (user mode)
5202 * - Guest has #AC detection enabled in CR0
5203 * - Guest EFLAGS has AC bit set
5217 struct kvm_run *kvm_run = vcpu->run; in handle_exception_nmi()
5222 vect_info = vmx->idt_vectoring_info; in handle_exception_nmi()
5227 * vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by in handle_exception_nmi()
5248 struct vmx_ve_information *ve_info = vmx->ve_info; in handle_exception_nmi()
5250 WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION, in handle_exception_nmi()
5251 "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason); in handle_exception_nmi()
5253 kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE"); in handle_exception_nmi()
5261 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { in handle_exception_nmi()
5266 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero in handle_exception_nmi()
5283 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_exception_nmi()
5284 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; in handle_exception_nmi()
5285 vcpu->run->internal.ndata = 4; in handle_exception_nmi()
5286 vcpu->run->internal.data[0] = vect_info; in handle_exception_nmi()
5287 vcpu->run->internal.data[1] = intr_info; in handle_exception_nmi()
5288 vcpu->run->internal.data[2] = error_code; in handle_exception_nmi()
5289 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; in handle_exception_nmi()
5295 if (enable_ept && !vcpu->arch.apf.host_apf_flags) { in handle_exception_nmi()
5309 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception_nmi()
5315 if (!(vcpu->guest_debug & in handle_exception_nmi()
5319 * instruction. ICEBP generates a trap-like #DB, but in handle_exception_nmi()
5321 * is an instruction intercept, i.e. the VM-Exit occurs in handle_exception_nmi()
5323 * avoid single-step #DB and MTF updates, as ICEBP is in handle_exception_nmi()
5328 * if single-step is enabled in RFLAGS and STI or MOVSS in handle_exception_nmi()
5330 * on VM-Exit due to #DB interception. VM-Entry has a in handle_exception_nmi()
5331 * consistency check that a single-step #DB is pending in handle_exception_nmi()
5335 * delay when activating single-step breakpoints must in handle_exception_nmi()
5337 * as appropriate for all other VM-Exits types. in handle_exception_nmi()
5350 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; in handle_exception_nmi()
5351 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); in handle_exception_nmi()
5359 vmx->vcpu.arch.event_exit_inst_len = in handle_exception_nmi()
5361 kvm_run->exit_reason = KVM_EXIT_DEBUG; in handle_exception_nmi()
5362 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_exception_nmi()
5363 kvm_run->debug.arch.exception = ex_no; in handle_exception_nmi()
5380 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; in handle_exception_nmi()
5381 kvm_run->ex.exception = ex_no; in handle_exception_nmi()
5382 kvm_run->ex.error_code = error_code; in handle_exception_nmi()
5390 ++vcpu->stat.irq_exits; in handle_external_interrupt()
5396 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in handle_triple_fault()
5397 vcpu->mmio_needed = 0; in handle_triple_fault()
5410 ++vcpu->stat.io_exits; in handle_io()
5432 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5444 * hardware. It consists of the L2-owned bits from the new in handle_set_cr0()
5445 * value combined with the L1-owned bits from L1's guest_cr0. in handle_set_cr0()
5447 val = (val & ~vmcs12->cr0_guest_host_mask) | in handle_set_cr0()
5448 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); in handle_set_cr0()
5466 val = (val & ~vmcs12->cr4_guest_host_mask) | in handle_set_cr4()
5467 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); in handle_set_cr4()
5526 * KVM_GUESTDBG_SINGLESTEP-triggered in handle_cr()
5529 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in handle_cr()
5535 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS"); in handle_cr()
5536 return -EIO; in handle_cr()
5562 vcpu->run->exit_reason = 0; in handle_cr()
5587 * As the vm-exit takes precedence over the debug trap, we in handle_dr()
5591 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in handle_dr()
5592 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; in handle_dr()
5593 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5594 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5595 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5596 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in handle_dr()
5604 if (vcpu->guest_debug == 0) { in handle_dr()
5612 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5630 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5631 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5632 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5633 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5634 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5635 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5637 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5650 set_debugreg(vcpu->arch.dr6, 6); in vmx_set_dr6()
5670 ++vcpu->stat.irq_window_exits; in handle_interrupt_window()
5692 * not cared. So make a short-circuit here by avoiding in handle_apic_access()
5709 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ in handle_apic_eoi_induced()
5719 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and in handle_apic_write()
5722 * the vAPIC page for the correct 16-byte chunk. KVM needs only to in handle_apic_write()
5740 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
5741 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
5742 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
5750 vcpu->arch.nmi_injected = false; in handle_task_switch()
5758 if (vmx->idt_vectoring_info & in handle_task_switch()
5784 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, in handle_task_switch()
5802 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5832 * would also use advanced VM-exit information for EPT violations to in handle_ept_violation()
5864 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm)) in handle_nmi_window()
5865 return -EIO; in handle_nmi_window()
5868 ++vcpu->stat.nmi_window_exits; in handle_nmi_window()
5878 return vmx->emulation_required && !vmx->rmode.vm86_active && in vmx_emulation_required_with_pending_exception()
5879 (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected); in vmx_emulation_required_with_pending_exception()
5891 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
5893 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5906 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
5907 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
5934 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5935 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5939 if (!kvm_pause_in_guest(vcpu->kvm)) in handle_pause()
5943 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" in handle_pause()
5944 * VM-execution control is ignored if CPL > 0. OTOH, KVM in handle_pause()
5992 trace_kvm_pml_full(vcpu->vcpu_id); in handle_pml_full()
6000 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
6019 * In the *extremely* unlikely scenario that this is a spurious VM-Exit in handle_fastpath_preemption_timer()
6021 * exit and re-enter the guest. in handle_fastpath_preemption_timer()
6023 if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) in handle_fastpath_preemption_timer()
6035 * expiration likely requires synthesizing a nested VM-Exit. in handle_fastpath_preemption_timer()
6047 * This non-fastpath handler is reached if and only if the preemption in handle_preemption_timer()
6083 * VM-Exits. Unconditionally set the flag here and leave the handling to in handle_bus_lock_vmexit()
6086 to_vmx(vcpu)->exit_reason.bus_lock_detected = true; in handle_bus_lock_vmexit()
6095 ++vcpu->stat.notify_window_exits; in handle_notify()
6105 if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER || in handle_notify()
6107 vcpu->run->exit_reason = KVM_EXIT_NOTIFY; in handle_notify()
6108 vcpu->run->notify.flags = context_invalid ? in handle_notify()
6184 *reason = vmx->exit_reason.full; in vmx_get_exit_info()
6186 if (!(vmx->exit_reason.failed_vmentry)) { in vmx_get_exit_info()
6187 *info2 = vmx->idt_vectoring_info; in vmx_get_exit_info()
6211 if (vmx->pml_pg) { in vmx_destroy_pml_buffer()
6212 __free_page(vmx->pml_pg); in vmx_destroy_pml_buffer()
6213 vmx->pml_pg = NULL; in vmx_destroy_pml_buffer()
6242 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
6244 for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) { in vmx_flush_pml_buffer()
6248 WARN_ON(gpa & (PAGE_SIZE - 1)); in vmx_flush_pml_buffer()
6260 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), in vmx_dump_sel()
6261 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), in vmx_dump_sel()
6262 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); in vmx_dump_sel()
6269 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); in vmx_dump_dtsel()
6278 for (i = 0, e = m->val; i < m->nr; ++i, ++e) in vmx_dump_msrs()
6279 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value); in vmx_dump_msrs()
6312 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", in dump_vmcs()
6313 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); in dump_vmcs()
6344 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); in dump_vmcs()
6349 vmx->msr_autoload.guest.val[efer_slot].value); in dump_vmcs()
6352 vcpu->arch.efer | (EFER_LMA | EFER_LME)); in dump_vmcs()
6355 vcpu->arch.efer & ~(EFER_LMA | EFER_LME)); in dump_vmcs()
6374 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest); in dump_vmcs()
6376 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest); in dump_vmcs()
6407 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host); in dump_vmcs()
6442 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); in dump_vmcs()
6443 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); in dump_vmcs()
6456 struct vmx_ve_information *ve_info = vmx->ve_info; in dump_vmcs()
6468 ve_info->exit_reason, ve_info->delivery, in dump_vmcs()
6469 ve_info->exit_qualification, in dump_vmcs()
6470 ve_info->guest_linear_address, in dump_vmcs()
6471 ve_info->guest_physical_address, ve_info->eptp_index); in dump_vmcs()
6482 union vmx_exit_reason exit_reason = vmx->exit_reason; in __vmx_handle_exit()
6483 u32 vectoring_info = vmx->idt_vectoring_info; in __vmx_handle_exit()
6498 * KVM should never reach this point with a pending nested VM-Enter. in __vmx_handle_exit()
6499 * More specifically, short-circuiting VM-Entry to emulate L2 due to in __vmx_handle_exit()
6501 * allowed a nested VM-Enter with an invalid vmcs12. More below. in __vmx_handle_exit()
6503 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) in __vmx_handle_exit()
6504 return -EIO; in __vmx_handle_exit()
6519 * address-translation-based dirty tracking (e.g. EPT write in __vmx_handle_exit()
6529 * operation, nested VM-Enter rejects any attempt to enter L2 in __vmx_handle_exit()
6538 if (vmx->emulation_required) { in __vmx_handle_exit()
6548 if (vmx->emulation_required) in __vmx_handle_exit()
6553 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in __vmx_handle_exit()
6554 vcpu->run->fail_entry.hardware_entry_failure_reason in __vmx_handle_exit()
6556 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in __vmx_handle_exit()
6560 if (unlikely(vmx->fail)) { in __vmx_handle_exit()
6562 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in __vmx_handle_exit()
6563 vcpu->run->fail_entry.hardware_entry_failure_reason in __vmx_handle_exit()
6565 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in __vmx_handle_exit()
6582 vmx->loaded_vmcs->soft_vnmi_blocked)) { in __vmx_handle_exit()
6584 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in __vmx_handle_exit()
6585 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && in __vmx_handle_exit()
6586 vcpu->arch.nmi_pending) { in __vmx_handle_exit()
6589 * NMI-blocked window if the guest runs with IRQs in __vmx_handle_exit()
6593 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " in __vmx_handle_exit()
6595 __func__, vcpu->vcpu_id); in __vmx_handle_exit()
6596 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in __vmx_handle_exit()
6631 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in __vmx_handle_exit()
6632 vcpu->run->internal.suberror = in __vmx_handle_exit()
6634 vcpu->run->internal.ndata = 2; in __vmx_handle_exit()
6635 vcpu->run->internal.data[0] = exit_reason.full; in __vmx_handle_exit()
6636 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; in __vmx_handle_exit()
6648 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { in vmx_handle_exit()
6650 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; in vmx_handle_exit()
6652 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; in vmx_handle_exit()
6680 * Clear the per-vcpu flush bit, it gets set again if the vCPU in vmx_l1d_flush()
6685 flush_l1d = vcpu->arch.l1tf_flush_l1d; in vmx_l1d_flush()
6686 vcpu->arch.l1tf_flush_l1d = false; in vmx_l1d_flush()
6689 * Clear the per-cpu flush bit, it gets set again from in vmx_l1d_flush()
6699 vcpu->stat.l1d_flush++; in vmx_l1d_flush()
6738 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; in vmx_update_cr8_intercept()
6740 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; in vmx_update_cr8_intercept()
6759 vmx->nested.change_vmcs01_virtual_apic_mode = true; in vmx_set_virtual_apic_mode()
6782 * the guest may have inserted a non-APIC mapping into in vmx_set_virtual_apic_mode()
6802 struct kvm *kvm = vcpu->kvm; in vmx_set_apic_access_page_addr()
6812 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; in vmx_set_apic_access_page_addr()
6827 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in vmx_set_apic_access_page_addr()
6836 mmu_seq = kvm->mmu_invalidate_seq; in vmx_set_apic_access_page_addr()
6841 * controls the APIC-access page memslot, and only deletes the memslot in vmx_set_apic_access_page_addr()
6848 read_lock(&vcpu->kvm->mmu_lock); in vmx_set_apic_access_page_addr()
6867 read_unlock(&vcpu->kvm->mmu_lock); in vmx_set_apic_access_page_addr()
6880 * VM-Exit, otherwise L1 with run with a stale SVI. in vmx_hwapic_isr_update()
6890 WARN_ON_ONCE(vcpu->wants_to_run && in vmx_hwapic_isr_update()
6892 to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true; in vmx_hwapic_isr_update()
6896 if (max_isr == -1) in vmx_hwapic_isr_update()
6913 if (vector == -1) in vmx_set_rvi()
6931 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) in vmx_sync_pir_to_irr()
6932 return -EIO; in vmx_sync_pir_to_irr()
6934 if (pi_test_on(&vmx->pi_desc)) { in vmx_sync_pir_to_irr()
6935 pi_clear_on(&vmx->pi_desc); in vmx_sync_pir_to_irr()
6942 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); in vmx_sync_pir_to_irr()
6955 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected in vmx_sync_pir_to_irr()
6961 * a VM-Exit and the subsequent entry will call sync_pir_to_irr. in vmx_sync_pir_to_irr()
6986 pi_clear_on(&vmx->pi_desc); in vmx_apicv_pre_state_restore()
6987 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); in vmx_apicv_pre_state_restore()
7005 * only when xfd contains a non-zero value. in handle_nm_fault_irqoff()
7009 if (vcpu->arch.guest_fpu.fpstate->xfd) in handle_nm_fault_irqoff()
7010 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in handle_nm_fault_irqoff()
7017 vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); in handle_exception_irqoff()
7031 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm, in handle_external_interrupt_irqoff()
7032 "unexpected VM-Exit interrupt info: 0x%x", intr_info)) in handle_external_interrupt_irqoff()
7042 vcpu->arch.at_instruction_boundary = true; in handle_external_interrupt_irqoff()
7049 if (vmx->emulation_required) in vmx_handle_exit_irqoff()
7052 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) in vmx_handle_exit_irqoff()
7054 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) in vmx_handle_exit_irqoff()
7091 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
7094 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_recover_nmi_blocking()
7097 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); in vmx_recover_nmi_blocking()
7102 * Re-set bit "block by NMI" before VM entry if vmexit caused by in vmx_recover_nmi_blocking()
7106 * If the VM exit sets the valid bit in the IDT-vectoring in vmx_recover_nmi_blocking()
7115 vmx->loaded_vmcs->nmi_known_unmasked = in vmx_recover_nmi_blocking()
7118 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
7119 vmx->loaded_vmcs->vnmi_blocked_time += in vmx_recover_nmi_blocking()
7121 vmx->loaded_vmcs->entry_time)); in vmx_recover_nmi_blocking()
7135 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
7149 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
7158 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
7168 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
7180 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
7199 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu); in atomic_switch_perf_msrs()
7201 pmu->host_cross_mapped_mask = 0; in atomic_switch_perf_msrs()
7202 if (pmu->pebs_enable & pmu->global_ctrl) in atomic_switch_perf_msrs()
7226 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
7227 } else if (vmx->hv_deadline_tsc != -1) { in vmx_update_hv_timer()
7229 if (vmx->hv_deadline_tsc > tscl) in vmx_update_hv_timer()
7230 /* set_hv_timer ensures the delta fits in 32-bits */ in vmx_update_hv_timer()
7231 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> in vmx_update_hv_timer()
7237 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
7238 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { in vmx_update_hv_timer()
7239 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); in vmx_update_hv_timer()
7240 vmx->loaded_vmcs->hv_timer_soft_disabled = true; in vmx_update_hv_timer()
7246 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { in vmx_update_host_rsp()
7247 vmx->loaded_vmcs->host_state.rsp = host_rsp; in vmx_update_host_rsp()
7261 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); in vmx_spec_ctrl_restore_host()
7271 vmx->spec_ctrl != hostval) in vmx_spec_ctrl_restore_host()
7285 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER) in vmx_exit_handlers_fastpath()
7288 switch (to_vmx(vcpu)->exit_reason.basic) { in vmx_exit_handlers_fastpath()
7316 kvm_arch_has_assigned_device(vcpu->kvm)) in vmx_vcpu_enter_exit()
7321 if (vcpu->arch.cr2 != native_read_cr2()) in vmx_vcpu_enter_exit()
7322 native_write_cr2(vcpu->arch.cr2); in vmx_vcpu_enter_exit()
7324 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in vmx_vcpu_enter_exit()
7327 vcpu->arch.cr2 = native_read_cr2(); in vmx_vcpu_enter_exit()
7328 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; in vmx_vcpu_enter_exit()
7330 vmx->idt_vectoring_info = 0; in vmx_vcpu_enter_exit()
7334 if (unlikely(vmx->fail)) { in vmx_vcpu_enter_exit()
7335 vmx->exit_reason.full = 0xdead; in vmx_vcpu_enter_exit()
7339 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_enter_exit()
7340 if (likely(!vmx->exit_reason.failed_vmentry)) in vmx_vcpu_enter_exit()
7341 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_enter_exit()
7343 if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI && in vmx_vcpu_enter_exit()
7364 vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_vcpu_run()
7365 vmx->loaded_vmcs->entry_time = ktime_get(); in vmx_vcpu_run()
7370 * consistency check VM-Exit due to invalid guest state and bail. in vmx_vcpu_run()
7372 if (unlikely(vmx->emulation_required)) { in vmx_vcpu_run()
7373 vmx->fail = 0; in vmx_vcpu_run()
7375 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; in vmx_vcpu_run()
7376 vmx->exit_reason.failed_vmentry = 1; in vmx_vcpu_run()
7378 vmx->exit_qualification = ENTRY_FAIL_DEFAULT; in vmx_vcpu_run()
7380 vmx->exit_intr_info = 0; in vmx_vcpu_run()
7386 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
7387 vmx->ple_window_dirty = false; in vmx_vcpu_run()
7388 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
7395 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); in vmx_vcpu_run()
7398 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
7400 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
7401 vcpu->arch.regs_dirty = 0; in vmx_vcpu_run()
7405 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time in vmx_vcpu_run()
7406 * it switches back to the current->mm, which can occur in KVM context in vmx_vcpu_run()
7408 * toggles a static key while handling a VM-Exit. in vmx_vcpu_run()
7411 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in vmx_vcpu_run()
7413 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_vcpu_run()
7417 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in vmx_vcpu_run()
7419 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_vcpu_run()
7422 /* When single-stepping over STI and MOV SS, we must clear the in vmx_vcpu_run()
7427 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in vmx_vcpu_run()
7441 smp_send_reschedule(vcpu->cpu); in vmx_vcpu_run()
7450 current_evmcs->hv_clean_fields |= in vmx_vcpu_run()
7453 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); in vmx_vcpu_run()
7457 if (vcpu->arch.host_debugctl) in vmx_vcpu_run()
7458 update_debugctlmsr(vcpu->arch.host_debugctl); in vmx_vcpu_run()
7482 if (vmx->nested.nested_run_pending && in vmx_vcpu_run()
7483 !vmx->exit_reason.failed_vmentry) in vmx_vcpu_run()
7484 ++vcpu->stat.nested_run; in vmx_vcpu_run()
7486 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
7489 if (unlikely(vmx->fail)) in vmx_vcpu_run()
7492 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) in vmx_vcpu_run()
7497 if (unlikely(vmx->exit_reason.failed_vmentry)) in vmx_vcpu_run()
7500 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
7514 free_vpid(vmx->vpid); in vmx_vcpu_free()
7516 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_vcpu_free()
7517 free_page((unsigned long)vmx->ve_info); in vmx_vcpu_free()
7529 INIT_LIST_HEAD(&vmx->pi_wakeup_list); in vmx_vcpu_create()
7531 err = -ENOMEM; in vmx_vcpu_create()
7533 vmx->vpid = allocate_vpid(); in vmx_vcpu_create()
7542 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in vmx_vcpu_create()
7543 if (!vmx->pml_pg) in vmx_vcpu_create()
7548 vmx->guest_uret_msrs[i].mask = -1ull; in vmx_vcpu_create()
7557 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; in vmx_vcpu_create()
7560 err = alloc_loaded_vmcs(&vmx->vmcs01); in vmx_vcpu_create()
7565 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a in vmx_vcpu_create()
7566 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the in vmx_vcpu_create()
7572 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; in vmx_vcpu_create()
7574 evmcs->hv_enlightenments_control.msr_bitmap = 1; in vmx_vcpu_create()
7578 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_vcpu_create()
7579 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_vcpu_create()
7590 if (kvm_cstate_in_guest(vcpu->kvm)) { in vmx_vcpu_create()
7597 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_vcpu_create()
7600 err = kvm_alloc_apic_access_page(vcpu->kvm); in vmx_vcpu_create()
7606 err = init_rmode_identity_map(vcpu->kvm); in vmx_vcpu_create()
7611 err = -ENOMEM; in vmx_vcpu_create()
7615 BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE); in vmx_vcpu_create()
7622 vmx->ve_info = page_to_virt(page); in vmx_vcpu_create()
7626 WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id], in vmx_vcpu_create()
7627 __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID); in vmx_vcpu_create()
7632 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_vcpu_create()
7636 free_vpid(vmx->vpid); in vmx_vcpu_create()
7640 …nt and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/ad…
7641 …tion disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/ad…
7646 kvm->arch.pause_in_guest = true; in vmx_vm_init()
7684 * Force WB and ignore guest PAT if the VM does NOT have a non-coherent in vmx_get_mt_mask()
7689 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) in vmx_get_mt_mask()
7716 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7723 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; in nested_vmx_cr_fixed1_bits_update()
7724 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; in nested_vmx_cr_fixed1_bits_update()
7727 if (entry && (entry->_reg & (_cpuid_mask))) \ in nested_vmx_cr_fixed1_bits_update()
7728 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ in nested_vmx_cr_fixed1_bits_update()
7736 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE)); in nested_vmx_cr_fixed1_bits_update()
7771 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; in update_intel_pt_cfg()
7772 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; in update_intel_pt_cfg()
7773 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; in update_intel_pt_cfg()
7774 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; in update_intel_pt_cfg()
7778 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps, in update_intel_pt_cfg()
7782 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | in update_intel_pt_cfg()
7790 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) in update_intel_pt_cfg()
7791 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; in update_intel_pt_cfg()
7797 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) in update_intel_pt_cfg()
7798 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | in update_intel_pt_cfg()
7804 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) in update_intel_pt_cfg()
7805 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | in update_intel_pt_cfg()
7809 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) in update_intel_pt_cfg()
7810 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | in update_intel_pt_cfg()
7814 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) in update_intel_pt_cfg()
7815 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; in update_intel_pt_cfg()
7818 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) in update_intel_pt_cfg()
7819 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; in update_intel_pt_cfg()
7822 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) in update_intel_pt_cfg()
7823 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; in update_intel_pt_cfg()
7826 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) in update_intel_pt_cfg()
7827 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); in update_intel_pt_cfg()
7849 vmx->msr_ia32_feature_control_valid_bits |= in vmx_vcpu_after_set_cpuid()
7853 vmx->msr_ia32_feature_control_valid_bits &= in vmx_vcpu_after_set_cpuid()
7889 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; in vmx_vcpu_after_set_cpuid()
7891 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; in vmx_vcpu_after_set_cpuid()
7894 vmx->msr_ia32_feature_control_valid_bits |= in vmx_vcpu_after_set_cpuid()
7897 vmx->msr_ia32_feature_control_valid_bits &= in vmx_vcpu_after_set_cpuid()
7945 * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with in vmx_get_perf_capabilities()
8016 if (info->intercept == x86_intercept_in || in vmx_check_intercept_io()
8017 info->intercept == x86_intercept_ins) { in vmx_check_intercept_io()
8018 port = info->src_val; in vmx_check_intercept_io()
8019 size = info->dst_bytes; in vmx_check_intercept_io()
8021 port = info->dst_val; in vmx_check_intercept_io()
8022 size = info->src_bytes; in vmx_check_intercept_io()
8026 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction in vmx_check_intercept_io()
8027 * VM-exits depend on the 'unconditional IO exiting' VM-execution in vmx_check_intercept_io()
8030 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. in vmx_check_intercept_io()
8049 switch (info->intercept) { in vmx_check_intercept()
8057 exception->vector = UD_VECTOR; in vmx_check_intercept()
8058 exception->error_code_valid = false; in vmx_check_intercept()
8085 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides in vmx_check_intercept()
8088 * PAUSE-loop-exiting, software can't expect a given PAUSE to in vmx_check_intercept()
8092 if ((info->rep_prefix != REPE_PREFIX) || in vmx_check_intercept()
8111 u64 low = a << shift, high = a >> (64 - shift); in u64_shl_div_u64()
8130 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; in vmx_set_hv_timer()
8135 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; in vmx_set_hv_timer()
8137 ktimer->timer_advance_ns); in vmx_set_hv_timer()
8140 delta_tsc -= lapic_timer_advance_cycles; in vmx_set_hv_timer()
8145 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio && in vmx_set_hv_timer()
8148 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc)) in vmx_set_hv_timer()
8149 return -ERANGE; in vmx_set_hv_timer()
8158 return -ERANGE; in vmx_set_hv_timer()
8160 vmx->hv_deadline_tsc = tscl + delta_tsc; in vmx_set_hv_timer()
8167 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
8179 vmx->nested.update_vmcs01_cpu_dirty_logging = true; in vmx_update_cpu_dirty_logging()
8188 if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) in vmx_update_cpu_dirty_logging()
8196 if (vcpu->arch.mcg_cap & MCG_LMCE_P) in vmx_setup_mce()
8197 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
8200 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
8208 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
8209 return -EBUSY; in vmx_smi_allowed()
8219 * SMI and RSM. Using the common VM-Exit + VM-Enter routines is wrong in vmx_enter_smm()
8221 * E.g. most MSRs are left untouched, but many are modified by VM-Exit in vmx_enter_smm()
8222 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM. in vmx_enter_smm()
8224 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); in vmx_enter_smm()
8225 if (vmx->nested.smm.guest_mode) in vmx_enter_smm()
8226 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_enter_smm()
8228 vmx->nested.smm.vmxon = vmx->nested.vmxon; in vmx_enter_smm()
8229 vmx->nested.vmxon = false; in vmx_enter_smm()
8239 if (vmx->nested.smm.vmxon) { in vmx_leave_smm()
8240 vmx->nested.vmxon = true; in vmx_leave_smm()
8241 vmx->nested.smm.vmxon = false; in vmx_leave_smm()
8244 if (vmx->nested.smm.guest_mode) { in vmx_leave_smm()
8249 vmx->nested.nested_run_pending = 1; in vmx_leave_smm()
8250 vmx->nested.smm.guest_mode = false; in vmx_leave_smm()
8263 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu); in vmx_apic_init_signal_blocked()
8269 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; in vmx_migrate_timers()
8290 free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); in vmx_vm_destroy()
8330 * Untag the address by sign-extending the lam_bit, but NOT to bit 63. in vmx_get_untagged_addr()
8347 (unsigned long *)&vcpu->arch.pmu.global_status); in vmx_handle_intel_pt_intr()
8355 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm in vmx_setup_user_return_msrs()
8382 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to in vmx_setup_me_spte_mask()
8390 kvm_host.maxphyaddr - 1); in vmx_setup_me_spte_mask()
8412 return -EIO; in vmx_hardware_setup()
8439 return -EOPNOTSUPP; in vmx_hardware_setup()
8519 * and EPT A/D bit features are enabled -- PML depends on them to work. in vmx_hardware_setup()
8558 return -EINVAL; in vmx_hardware_setup()
8618 return -EOPNOTSUPP; in vmx_init()