Lines Matching +full:page +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables kernel and guest-mode vCPU access to guest physical
30 spin_lock(&kvm->gpc_lock); in gfn_to_pfn_cache_invalidate_start()
31 list_for_each_entry(gpc, &kvm->gpc_list, list) { in gfn_to_pfn_cache_invalidate_start()
32 read_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
34 /* Only a single page so no need to care about length */ in gfn_to_pfn_cache_invalidate_start()
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
36 gpc->uhva >= start && gpc->uhva < end) { in gfn_to_pfn_cache_invalidate_start()
37 read_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
47 write_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
49 gpc->uhva >= start && gpc->uhva < end) in gfn_to_pfn_cache_invalidate_start()
50 gpc->valid = false; in gfn_to_pfn_cache_invalidate_start()
51 write_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
55 read_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
57 spin_unlock(&kvm->gpc_lock); in gfn_to_pfn_cache_invalidate_start()
67 * The cached access must fit within a single page. The 'len' argument in kvm_gpc_is_valid_len()
75 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in kvm_gpc_check()
77 if (!gpc->active) in kvm_gpc_check()
81 * If the page was cached from a memslot, make sure the memslots have in kvm_gpc_check()
82 * not been re-configured. in kvm_gpc_check()
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) in kvm_gpc_check()
87 if (kvm_is_error_hva(gpc->uhva)) in kvm_gpc_check()
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_check()
93 if (!gpc->valid) in kvm_gpc_check()
113 /* Unmap the old pfn/page if it was mapped before. */ in gpc_unmap()
137 * is not protected by gpc->lock. It is guaranteed to in mmu_notifier_retry_cache()
138 * be elevated before the mmu_notifier acquires gpc->lock, and in mmu_notifier_retry_cache()
141 if (kvm->mn_active_invalidate_count) in mmu_notifier_retry_cache()
148 * old (non-zero) value of mn_active_invalidate_count or the in mmu_notifier_retry_cache()
152 return kvm->mmu_invalidate_seq != mmu_seq; in mmu_notifier_retry_cache()
157 /* Note, the new page offset may be different than the old! */ in hva_to_pfn_retry()
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in hva_to_pfn_retry()
162 struct page *page; in hva_to_pfn_retry() local
165 .slot = gpc->memslot, in hva_to_pfn_retry()
166 .gfn = gpa_to_gfn(gpc->gpa), in hva_to_pfn_retry()
168 .hva = gpc->uhva, in hva_to_pfn_retry()
169 .refcounted_page = &page, in hva_to_pfn_retry()
172 lockdep_assert_held(&gpc->refresh_lock); in hva_to_pfn_retry()
174 lockdep_assert_held_write(&gpc->lock); in hva_to_pfn_retry()
177 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva in hva_to_pfn_retry()
181 gpc->valid = false; in hva_to_pfn_retry()
184 mmu_seq = gpc->kvm->mmu_invalidate_seq; in hva_to_pfn_retry()
187 write_unlock_irq(&gpc->lock); in hva_to_pfn_retry()
204 kvm_release_page_unused(page); in hva_to_pfn_retry()
216 * too must be done outside of gpc->lock! in hva_to_pfn_retry()
218 if (new_pfn == gpc->pfn) in hva_to_pfn_retry()
224 kvm_release_page_unused(page); in hva_to_pfn_retry()
228 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
234 WARN_ON_ONCE(gpc->valid); in hva_to_pfn_retry()
235 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); in hva_to_pfn_retry()
237 gpc->valid = true; in hva_to_pfn_retry()
238 gpc->pfn = new_pfn; in hva_to_pfn_retry()
239 gpc->khva = new_khva + offset_in_page(gpc->uhva); in hva_to_pfn_retry()
242 * Put the reference to the _new_ page. The page is now tracked by the in hva_to_pfn_retry()
246 kvm_release_page_clean(page); in hva_to_pfn_retry()
251 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
253 return -EFAULT; in hva_to_pfn_retry()
268 return -EINVAL; in __kvm_gpc_refresh()
270 lockdep_assert_held(&gpc->refresh_lock); in __kvm_gpc_refresh()
272 write_lock_irq(&gpc->lock); in __kvm_gpc_refresh()
274 if (!gpc->active) { in __kvm_gpc_refresh()
275 ret = -EINVAL; in __kvm_gpc_refresh()
279 old_pfn = gpc->pfn; in __kvm_gpc_refresh()
280 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in __kvm_gpc_refresh()
281 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); in __kvm_gpc_refresh()
286 gpc->gpa = INVALID_GPA; in __kvm_gpc_refresh()
287 gpc->memslot = NULL; in __kvm_gpc_refresh()
288 gpc->uhva = PAGE_ALIGN_DOWN(uhva); in __kvm_gpc_refresh()
290 if (gpc->uhva != old_uhva) in __kvm_gpc_refresh()
293 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in __kvm_gpc_refresh()
297 if (gpc->gpa != gpa || gpc->generation != slots->generation || in __kvm_gpc_refresh()
298 kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
301 gpc->gpa = gpa; in __kvm_gpc_refresh()
302 gpc->generation = slots->generation; in __kvm_gpc_refresh()
303 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh()
304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh()
306 if (kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
307 ret = -EFAULT; in __kvm_gpc_refresh()
315 if (gpc->uhva != old_uhva) in __kvm_gpc_refresh()
318 gpc->uhva = old_uhva; in __kvm_gpc_refresh()
323 gpc->uhva += page_offset; in __kvm_gpc_refresh()
329 if (!gpc->valid || hva_change) { in __kvm_gpc_refresh()
334 * But do update gpc->khva because the offset within the page in __kvm_gpc_refresh()
337 gpc->khva = old_khva + page_offset; in __kvm_gpc_refresh()
349 gpc->valid = false; in __kvm_gpc_refresh()
350 gpc->pfn = KVM_PFN_ERR_FAULT; in __kvm_gpc_refresh()
351 gpc->khva = NULL; in __kvm_gpc_refresh()
355 unmap_old = (old_pfn != gpc->pfn); in __kvm_gpc_refresh()
358 write_unlock_irq(&gpc->lock); in __kvm_gpc_refresh()
370 guard(mutex)(&gpc->refresh_lock); in kvm_gpc_refresh()
372 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_refresh()
373 return -EINVAL; in kvm_gpc_refresh()
376 * If the GPA is valid then ignore the HVA, as a cache can be GPA-based in kvm_gpc_refresh()
377 * or HVA-based, not both. For GPA-based caches, the HVA will be in kvm_gpc_refresh()
380 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; in kvm_gpc_refresh()
382 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva); in kvm_gpc_refresh()
387 rwlock_init(&gpc->lock); in kvm_gpc_init()
388 mutex_init(&gpc->refresh_lock); in kvm_gpc_init()
390 gpc->kvm = kvm; in kvm_gpc_init()
391 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_init()
392 gpc->gpa = INVALID_GPA; in kvm_gpc_init()
393 gpc->uhva = KVM_HVA_ERR_BAD; in kvm_gpc_init()
394 gpc->active = gpc->valid = false; in kvm_gpc_init()
400 struct kvm *kvm = gpc->kvm; in __kvm_gpc_activate()
403 return -EINVAL; in __kvm_gpc_activate()
405 guard(mutex)(&gpc->refresh_lock); in __kvm_gpc_activate()
407 if (!gpc->active) { in __kvm_gpc_activate()
408 if (KVM_BUG_ON(gpc->valid, kvm)) in __kvm_gpc_activate()
409 return -EIO; in __kvm_gpc_activate()
411 spin_lock(&kvm->gpc_lock); in __kvm_gpc_activate()
412 list_add(&gpc->list, &kvm->gpc_list); in __kvm_gpc_activate()
413 spin_unlock(&kvm->gpc_lock); in __kvm_gpc_activate()
420 write_lock_irq(&gpc->lock); in __kvm_gpc_activate()
421 gpc->active = true; in __kvm_gpc_activate()
422 write_unlock_irq(&gpc->lock); in __kvm_gpc_activate()
431 * by KVM to differentiate between GPA-based and HVA-based caches. in kvm_gpc_activate()
434 return -EINVAL; in kvm_gpc_activate()
442 return -EINVAL; in kvm_gpc_activate_hva()
449 struct kvm *kvm = gpc->kvm; in kvm_gpc_deactivate()
453 guard(mutex)(&gpc->refresh_lock); in kvm_gpc_deactivate()
455 if (gpc->active) { in kvm_gpc_deactivate()
459 * until gpc->lock is dropped and refresh is guaranteed to fail. in kvm_gpc_deactivate()
461 write_lock_irq(&gpc->lock); in kvm_gpc_deactivate()
462 gpc->active = false; in kvm_gpc_deactivate()
463 gpc->valid = false; in kvm_gpc_deactivate()
471 old_khva = gpc->khva - offset_in_page(gpc->khva); in kvm_gpc_deactivate()
472 gpc->khva = NULL; in kvm_gpc_deactivate()
474 old_pfn = gpc->pfn; in kvm_gpc_deactivate()
475 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_deactivate()
476 write_unlock_irq(&gpc->lock); in kvm_gpc_deactivate()
478 spin_lock(&kvm->gpc_lock); in kvm_gpc_deactivate()
479 list_del(&gpc->list); in kvm_gpc_deactivate()
480 spin_unlock(&kvm->gpc_lock); in kvm_gpc_deactivate()