Lines Matching full:region
255 * range addressed by a single page table into a low and high region
257 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
395 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
433 * MMIO region would prevent silently clobbering the MMIO region. in __vm_create()
436 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
515 struct userspace_mem_region *region; in kvm_vm_restart() local
521 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
522 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in kvm_vm_restart()
528 ret, errno, region->region.slot, in kvm_vm_restart()
529 region->region.flags, in kvm_vm_restart()
530 region->region.guest_phys_addr, in kvm_vm_restart()
531 region->region.memory_size); in kvm_vm_restart()
619 * Userspace Memory Region Find
629 * Pointer to overlapping region, NULL if no such region.
631 * Searches for a region with any physical memory that overlaps with
635 * region exists.
643 struct userspace_mem_region *region = in userspace_mem_region_find() local
645 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
646 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
647 + region->region.memory_size - 1; in userspace_mem_region_find()
649 return region; in userspace_mem_region_find()
715 struct userspace_mem_region *region) in __vm_mem_region_delete() argument
719 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
720 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
721 hash_del(®ion->slot_node); in __vm_mem_region_delete()
723 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
724 sparsebit_free(®ion->protected_phy_pages); in __vm_mem_region_delete()
725 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
727 if (region->fd >= 0) { in __vm_mem_region_delete()
729 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
731 close(region->fd); in __vm_mem_region_delete()
733 if (region->region.guest_memfd >= 0) in __vm_mem_region_delete()
734 close(region->region.guest_memfd); in __vm_mem_region_delete()
736 free(region); in __vm_mem_region_delete()
746 struct userspace_mem_region *region; in kvm_vm_free() local
758 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
759 __vm_mem_region_delete(vmp, region); in kvm_vm_free()
792 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
801 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
802 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
805 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
806 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
807 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
813 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
814 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
818 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
827 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
830 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
832 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
838 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
839 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
846 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
854 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
874 struct kvm_userspace_memory_region2 region = { in __vm_set_user_memory_region2() local
886 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); in __vm_set_user_memory_region2()
907 struct userspace_mem_region *region; in vm_mem_add() local
930 * Confirm a mem region with an overlapping address doesn't in vm_mem_add()
933 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_mem_add()
935 if (region != NULL) in vm_mem_add()
942 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
943 (uint64_t) region->region.memory_size); in vm_mem_add()
945 /* Confirm no region with the requested slot already exists. */ in vm_mem_add()
946 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
948 if (region->region.slot != slot) in vm_mem_add()
951 TEST_FAIL("A mem region with the requested slot " in vm_mem_add()
956 region->region.slot, in vm_mem_add()
957 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
958 (uint64_t) region->region.memory_size); in vm_mem_add()
961 /* Allocate and initialize new mem region structure. */ in vm_mem_add()
962 region = calloc(1, sizeof(*region)); in vm_mem_add()
963 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_mem_add()
964 region->mmap_size = mem_size; in vm_mem_add()
986 region->mmap_size += alignment; in vm_mem_add()
988 region->fd = -1; in vm_mem_add()
990 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_mem_add()
993 region->mmap_start = mmap(NULL, region->mmap_size, in vm_mem_add()
996 region->fd, 0); in vm_mem_add()
997 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_mem_add()
1001 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_mem_add()
1003 region->mmap_start, backing_src_pagesz); in vm_mem_add()
1006 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_mem_add()
1011 ret = madvise(region->host_mem, mem_size, in vm_mem_add()
1014 region->host_mem, mem_size, in vm_mem_add()
1018 region->backing_src_type = src_type; in vm_mem_add()
1029 * can be closed when the region is deleted without in vm_mem_add()
1037 region->region.guest_memfd = guest_memfd; in vm_mem_add()
1038 region->region.guest_memfd_offset = guest_memfd_offset; in vm_mem_add()
1040 region->region.guest_memfd = -1; in vm_mem_add()
1043 region->unused_phy_pages = sparsebit_alloc(); in vm_mem_add()
1045 region->protected_phy_pages = sparsebit_alloc(); in vm_mem_add()
1046 sparsebit_set_num(region->unused_phy_pages, in vm_mem_add()
1048 region->region.slot = slot; in vm_mem_add()
1049 region->region.flags = flags; in vm_mem_add()
1050 region->region.guest_phys_addr = guest_paddr; in vm_mem_add()
1051 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1052 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_mem_add()
1053 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_add()
1059 guest_paddr, (uint64_t) region->region.memory_size, in vm_mem_add()
1060 region->region.guest_memfd); in vm_mem_add()
1063 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1064 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1065 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_mem_add()
1068 if (region->fd >= 0) { in vm_mem_add()
1069 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_mem_add()
1072 region->fd, 0); in vm_mem_add()
1073 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_mem_add()
1077 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_mem_add()
1090 * Memslot to region
1099 * Pointer to memory region structure that describe memory region
1101 * on error (e.g. currently no memory region using memslot as a KVM
1107 struct userspace_mem_region *region; in memslot2region() local
1109 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1111 if (region->region.slot == memslot) in memslot2region()
1112 return region; in memslot2region()
1114 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
1118 TEST_FAIL("Mem region not found"); in memslot2region()
1123 * VM Memory Region Flags Set
1133 * Sets the flags of the memory region specified by the value of slot,
1139 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1141 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1143 region->region.flags = flags; in vm_mem_region_set_flags()
1145 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_set_flags()
1153 * VM Memory Region Move
1157 * slot - Slot of the memory region to move
1164 * Change the gpa of a memory region.
1168 struct userspace_mem_region *region; in vm_mem_region_move() local
1171 region = memslot2region(vm, slot); in vm_mem_region_move()
1173 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1175 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_move()
1183 * VM Memory Region Delete
1187 * slot - Slot of the memory region to delete
1193 * Delete a memory region.
1197 struct userspace_mem_region *region = memslot2region(vm, slot); in vm_mem_region_delete() local
1199 region->region.memory_size = 0; in vm_mem_region_delete()
1200 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_delete()
1202 __vm_mem_region_delete(vm, region); in vm_mem_region_delete()
1209 struct userspace_mem_region *region; in vm_guest_mem_fallocate() local
1218 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1219 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, in vm_guest_mem_fallocate()
1220 "Private memory region not found for GPA 0x%lx", gpa); in vm_guest_mem_fallocate()
1222 offset = gpa - region->region.guest_phys_addr; in vm_guest_mem_fallocate()
1223 fd_offset = region->region.guest_memfd_offset + offset; in vm_guest_mem_fallocate()
1224 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); in vm_guest_mem_fallocate()
1226 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); in vm_guest_mem_fallocate()
1229 region->region.guest_memfd, mode, fd_offset); in vm_guest_mem_fallocate()
1442 * a page. The allocated physical space comes from the TEST_DATA memory region.
1538 * Locates the memory region containing the VM physical address given
1541 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1545 struct userspace_mem_region *region; in addr_gpa2hva() local
1549 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1550 if (!region) { in addr_gpa2hva()
1555 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1556 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1571 * Locates the memory region containing the host virtual address given
1574 * region containing hva exists.
1581 struct userspace_mem_region *region = in addr_hva2gpa() local
1584 if (hva >= region->host_mem) { in addr_hva2gpa()
1585 if (hva <= (region->host_mem in addr_hva2gpa()
1586 + region->region.memory_size - 1)) in addr_hva2gpa()
1588 region->region.guest_phys_addr in addr_hva2gpa()
1589 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1621 struct userspace_mem_region *region; in addr_gpa2alias() local
1624 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1625 if (!region) in addr_gpa2alias()
1628 if (!region->host_alias) in addr_gpa2alias()
1631 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1632 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1881 struct userspace_mem_region *region; in vm_dump() local
1888 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1891 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1892 (uint64_t) region->region.memory_size, in vm_dump()
1893 region->host_mem); in vm_dump()
1895 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1896 if (region->protected_phy_pages) { in vm_dump()
1898 sparsebit_dump(stream, region->protected_phy_pages, 0); in vm_dump()
2000 * memslot - Memory region to allocate page from
2017 struct userspace_mem_region *region; in __vm_phy_pages_alloc() local
2027 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2028 TEST_ASSERT(!protected || region->protected_phy_pages, in __vm_phy_pages_alloc()
2029 "Region doesn't support protected memory"); in __vm_phy_pages_alloc()
2034 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in __vm_phy_pages_alloc()
2035 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2051 sparsebit_clear(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2053 sparsebit_set(region->protected_phy_pages, pg); in __vm_phy_pages_alloc()
2265 struct userspace_mem_region *region; in vm_is_gpa_protected() local
2270 region = userspace_mem_region_find(vm, paddr, paddr); in vm_is_gpa_protected()
2271 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); in vm_is_gpa_protected()
2274 return sparsebit_is_set(region->protected_phy_pages, pg); in vm_is_gpa_protected()