Lines Matching +full:guest +full:- +full:index +full:- +full:bits

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
32 #include <asm/pvclock-abi.h>
35 #include <asm/msr-index.h>
82 /* x86-specific vcpu->requests bit members */
151 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
152 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
155 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
234 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
235 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
275 * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
280 * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
281 * when the guest was accessing private memory.
286 /* apic attention bits */
289 * The following bit is set with PV-EOI, unset on EOI.
290 * We detect PV-EOI changes by guest by comparing
291 * this bit with PV-EOI in guest memory.
305 * Upper-level shadow pages having gptes are tracked for write-protection via
307 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
312 * incorporates various mode bits and properties of the SP. Roughly speaking,
314 * is the number of bits that are used to compute the role.
316 * But, even though there are 20 bits in the mask below, not all combinations
319 * - invalid shadow pages are not accounted, mirror pages are not shadowed,
320 * so the bits are effectively 18.
322 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
324 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
326 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
327 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
331 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
332 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
334 * Therefore, the maximum number of possible upper-level shadow pages for a
370 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
371 * don't treat all-zero structure as valid data.
376 * CR4.PKE only affects permission checks for software walks of the guest page
381 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
438 #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
446 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
447 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
452 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
468 * consists of 16 domains indexed by page fault error code bits [4:1],
470 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
478 * Byte index: page fault error code [4:1]
479 * Bit index: pte permissions in ACC_* format
488 * check zero bits on shadow page table entries, these
489 * bits include not only hardware reserved bits but also
490 * the bits spte never used.
514 * guest or userspace.
517 * doesn't need to reprogram the perf_event every time the guest writes
567 * Overlay the bitmap with a 64-bit atomic so that all bits can be
585 * If a guest counter is cross-mapped to host counter with different
586 * index, its PEBS capability will be temporarily disabled.
601 * redundant check before cleanup if guest don't use vPMU at all.
621 /* Hyper-V SynIC timer */
624 int index; member
632 /* Hyper-V synthetic interrupt controller (SynIC)*/
656 #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
669 /* Hyper-V per vcpu emulation context */
726 u64 timer_expires; /* In guest epoch */
746 * Hardware-defined CPUID leafs that are either scattered by the kernel or are
759 NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
805 * If the vcpu runs in guest mode with two level paging this still saves
806 * the paging mode of the l1 guest. This context is always used to
811 /* Non-nested MMU for L1 */
818 * Paging state of an L2 guest (used for nested npt)
821 * of an L2 guest. This context is only initialized for page table
844 * QEMU userspace and the guest each have their own FPU state.
845 * In vcpu_run, we switch between the user and guest FPU contexts.
846 * While running a VCPU, the VCPU thread will have the guest FPU
851 * "guest_fpstate" state here contains the guest FPU context, with the
852 * host PRKU bits.
868 /* Exceptions to be injected to the guest. */
870 /* Exception VM-Exits to be synthesized to L1. */
886 * cpu_caps holds the effective guest capabilities, i.e. the features
888 * be used by the guest if and only if both KVM and userspace want to
889 * expose the feature to the guest.
892 * prevent the guest from using a feature, in which case the vCPU "has"
897 * guest CPUID provided by userspace.
915 /* set guest stopped flag in pvclock flags field */
977 /* used for guest single stepping over the given code position */
1027 /* be preempted when it's in kernel-mode(cpl=0) */
1033 /* Host CPU on which VM-entry was most recently attempted */
1049 * are not present in the guest's cpuid
1059 * reading the guest memory
1074 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1081 * software-enabled local APIs to be in the same mode, each addressable APIC to
1111 /* Hyper-V synthetic debugger (SynDbg)*/
1123 /* Current state of Hyper-V TSC page clocksource */
1127 /* TSC page MSR was written by the guest, update pending */
1138 /* Hyper-V emulation context */
1146 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1159 /* How many vCPUs have VP index != vCPU index */
1233 * being used by a HyperV guest.
1257 * first time either APIC ID or APIC base are changed by the guest
1268 * AVIC is inhibited on a vCPU because it runs a nested guest.
1284 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1340 * guest attempts to execute from the region then KVM obviously can't
1341 * create an NX huge page (without hanging the guest).
1390 * preemption-disabled region, so it must be a raw spinlock.
1452 * If exit_on_emulation_error is set, and the in-kernel instruction
1464 /* Guest can access the SGX PROVISIONKEY. */
1498 * - tdp_mmu_roots (above)
1499 * - the link field of kvm_mmu_page structs used by the TDP MMU
1500 * - possible_nx_huge_pages;
1501 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1533 * VM-scope maximum vCPU ID. Used to determine the size of structures
1546 * Protected by kvm->slots_lock.
1558 * Protected by kvm->slots_lock.
1631 u32 index; member
1661 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1717 * Does not need to flush GPA->HPA mappings.
1718 * Can potentially get non-canonical addresses through INVLPGs, which
1724 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1725 * does not need to flush GPA->HPA mappings.
1929 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1932 #include <asm/kvm-x86-ops.h>
1954 return -ENOTSUPP; in kvm_arch_flush_remote_tlbs()
1962 return -EOPNOTSUPP; in kvm_arch_flush_remote_tlbs_range()
1969 /* Values are arbitrary, but must be non-zero. */
1976 ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
1977 (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
2034 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
2039 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
2044 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
2049 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
2053 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
2060 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
2065 * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
2068 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
2069 * state and inject single-step #DBs after skipping
2072 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
2081 * If emulation fails for a write to guest page tables,
2083 * gfn and resumes the guest to retry the non-emulatable
2085 * doesn't allow forward progress for a self-changing
2117 int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2118 int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
2119 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
2120 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2121 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
2248 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
2253 #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
2296 #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2305 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2325 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2371 return (irq->delivery_mode == APIC_DM_FIXED || in kvm_irq_is_postable()
2372 irq->delivery_mode == APIC_DM_LOWEST); in kvm_irq_is_postable()
2414 * remaining 31 lower bits must be 0 to preserve ABI.