Lines Matching full:nested

44 		 * TODO: track the cause of the nested page fault, and  in nested_svm_inject_npf_exit()
62 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
81 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
99 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
120 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) in nested_vmcb_needs_vls_intercept()
139 g = &svm->nested.ctl; in recalc_intercepts()
161 * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB in recalc_intercepts()
188 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
199 * - Nested hypervisor (L1) is attempting to launch the same L2 as in nested_svm_vmrun_msrpm()
201 * - Nested hypervisor (L1) is using Hyper-V emulation interface and in nested_svm_vmrun_msrpm()
205 if (!svm->nested.force_msr_bitmap_recalc) { in nested_svm_vmrun_msrpm()
206 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_vmrun_msrpm()
210 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS))) in nested_svm_vmrun_msrpm()
215 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
227 /* x2apic msrs are intercepted always for the nested guest */ in nested_svm_vmrun_msrpm()
231 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
236 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
239 svm->nested.force_msr_bitmap_recalc = false; in nested_svm_vmrun_msrpm()
244 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
326 struct vmcb_save_area_cached *save = &svm->nested.save; in nested_vmcb_check_save()
334 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; in nested_vmcb_check_controls()
389 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control); in nested_copy_vmcb_control_to_cache()
411 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); in nested_copy_vmcb_save_to_cache()
421 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in nested_sync_control_from_vmcb02()
422 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in nested_sync_control_from_vmcb02()
435 !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts)) in nested_sync_control_from_vmcb02()
444 svm->nested.ctl.int_ctl &= ~mask; in nested_sync_control_from_vmcb02()
445 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in nested_sync_control_from_vmcb02()
495 * - Honor L1's request to flush an ASID on nested VMRUN in nested_svm_transition_tlb_flush()
496 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] in nested_svm_transition_tlb_flush()
497 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN in nested_svm_transition_tlb_flush()
500 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested in nested_svm_transition_tlb_flush()
508 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
534 if (!svm->nested.vmcb02.ptr) in nested_vmcb02_compute_g_pat()
538 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; in nested_vmcb02_compute_g_pat()
545 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_save()
550 /* Load the nested guest state */ in nested_vmcb02_prepare_save()
551 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { in nested_vmcb02_prepare_save()
553 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; in nested_vmcb02_prepare_save()
554 svm->nested.force_msr_bitmap_recalc = true; in nested_vmcb02_prepare_save()
574 svm_set_efer(vcpu, svm->nested.save.efer); in nested_vmcb02_prepare_save()
576 svm_set_cr0(vcpu, svm->nested.save.cr0); in nested_vmcb02_prepare_save()
577 svm_set_cr4(vcpu, svm->nested.save.cr4); in nested_vmcb02_prepare_save()
592 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; in nested_vmcb02_prepare_save()
593 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; in nested_vmcb02_prepare_save()
598 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_vmcb02_prepare_save()
645 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_control()
660 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) in nested_vmcb02_prepare_control()
692 svm->nested.ctl.tsc_offset, in nested_vmcb02_prepare_control()
702 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | in nested_vmcb02_prepare_control()
705 vmcb02->control.int_vector = svm->nested.ctl.int_vector; in nested_vmcb02_prepare_control()
706 vmcb02->control.int_state = svm->nested.ctl.int_state; in nested_vmcb02_prepare_control()
707 vmcb02->control.event_inj = svm->nested.ctl.event_inj; in nested_vmcb02_prepare_control()
708 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_vmcb02_prepare_control()
719 vmcb02->control.next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
729 svm->soft_int_next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
738 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); in nested_vmcb02_prepare_control()
744 pause_count12 = svm->nested.ctl.pause_filter_count; in nested_vmcb02_prepare_control()
748 pause_thresh12 = svm->nested.ctl.pause_filter_thresh; in nested_vmcb02_prepare_control()
762 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { in nested_vmcb02_prepare_control()
781 * moved at the time of nested vmrun and vmexit. in nested_svm_copy_common_state()
813 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
815 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
817 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
819 svm_switch_vmcb(svm, &svm->nested.vmcb02); in enter_svm_guest_mode()
823 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, in enter_svm_guest_mode()
850 if (!svm->nested.hsave_msr) { in nested_svm_vmrun()
880 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
908 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
917 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
975 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_svm_vmexit()
980 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
991 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
992 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
1032 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
1033 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
1034 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
1042 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); in nested_svm_vmexit()
1061 * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's in nested_svm_vmexit()
1069 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_svm_vmexit()
1110 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
1174 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN)) in nested_svm_triple_fault()
1185 if (svm->nested.initialized) in svm_allocate_nested()
1191 svm->nested.vmcb02.ptr = page_address(vmcb02_page); in svm_allocate_nested()
1192 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); in svm_allocate_nested()
1194 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
1195 if (!svm->nested.msrpm) in svm_allocate_nested()
1197 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
1199 svm->nested.initialized = true; in svm_allocate_nested()
1209 if (!svm->nested.initialized) in svm_free_nested()
1215 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
1216 svm->nested.msrpm = NULL; in svm_free_nested()
1218 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); in svm_free_nested()
1219 svm->nested.vmcb02.ptr = NULL; in svm_free_nested()
1228 svm->nested.last_vmcb12_gpa = INVALID_GPA; in svm_free_nested()
1230 svm->nested.initialized = false; in svm_free_nested()
1238 svm->nested.nested_run_pending = 0; in svm_leave_nested()
1239 svm->nested.vmcb12_gpa = INVALID_GPA; in svm_leave_nested()
1260 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
1274 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
1287 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
1293 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
1318 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1323 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1341 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1381 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector)); in nested_svm_is_exception_vmexit()
1422 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
1430 * Only a pending nested run blocks a pending exception. If there is a in svm_check_nested_events()
1434 bool block_nested_exceptions = svm->nested.nested_run_pending; in svm_check_nested_events()
1607 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1611 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1635 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); in svm_get_nested_state()
1746 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; in svm_set_nested_state()
1750 svm->nested.nested_run_pending = in svm_set_nested_state()
1753 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1758 svm_switch_vmcb(svm, &svm->nested.vmcb02); in svm_set_nested_state()
1762 * While the nested guest CR3 is already checked and set by in svm_set_nested_state()
1763 * KVM_SET_SREGS, it was set when nested state was yet loaded, in svm_set_nested_state()
1773 svm->nested.force_msr_bitmap_recalc = true; in svm_set_nested_state()
1795 * the guest CR3 might be restored prior to setting the nested in svm_get_nested_state_pages()