Lines Matching +defs:access +defs:desc

71 #include <asm/desc.h>
964 * else the access will fault indefinitely (and to emulate hardware).
7538 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7547 access |= PFERR_USER_MASK;
7548 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7558 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7559 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7568 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7569 access |= PFERR_WRITE_MASK;
7570 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7574 /* uses this to access any guest's mapped memory without checking CPL */
7584 struct kvm_vcpu *vcpu, u64 access,
7592 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7621 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7626 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7646 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7655 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7665 u64 access = 0;
7668 access |= PFERR_IMPLICIT_ACCESS;
7670 access |= PFERR_USER_MASK;
7672 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7676 struct kvm_vcpu *vcpu, u64 access,
7684 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7710 u64 access = PFERR_WRITE_MASK;
7713 access |= PFERR_IMPLICIT_ACCESS;
7715 access |= PFERR_USER_MASK;
7718 access, exception);
7769 /* For APIC access vmexit */
7786 u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
7796 vcpu->arch.mmio_access, 0, access))) {
7803 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
8048 * enabled in the host and the access splits a cache line.
8088 * back the original value and the access is atomic, but KVM's ABI is
8360 struct desc_struct *desc, u32 *base3,
8369 memset(desc, 0, sizeof(*desc));
8377 set_desc_limit(desc, var.limit);
8378 set_desc_base(desc, (unsigned long)var.base);
8383 desc->type = var.type;
8384 desc->s = var.s;
8385 desc->dpl = var.dpl;
8386 desc->p = var.present;
8387 desc->avl = var.avl;
8388 desc->l = var.l;
8389 desc->d = var.db;
8390 desc->g = var.g;
8396 struct desc_struct *desc, u32 base3,
8403 var.base = get_desc_base(desc);
8407 var.limit = get_desc_limit(desc);
8408 if (desc->g)
8410 var.type = desc->type;
8411 var.dpl = desc->dpl;
8412 var.db = desc->d;
8413 var.s = desc->s;
8414 var.l = desc->l;
8415 var.g = desc->g;
8416 var.avl = desc->avl;
8417 var.present = desc->p;
8870 * If the failed instruction faulted on an access to page tables that
10579 * the vCPU would incorrectly be able to access the vAPIC page via MMIO
10581 * access page is sticky.
11009 * since we do this before handling the vmexit, a DR access vmexit
13155 * the subtly complex checks when removing write access.
13188 * write access" helpers to ignore MMU-writable entirely.
13191 * access-tracked SPTEs is particularly relevant).
13675 u64 access = error_code &
13679 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {