diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 60a35d9fe2598196a68ba975e240910dd00edc2d..eb625af4fc5ee6e2222f33417dcdd6a3431c1efb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -866,7 +866,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); @@ -942,12 +942,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2237abb93ccdefd82f89b63e5f664802570bb8f4..234eab05983992e333d5d2588b3f79b89b891b4b 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -53,13 +53,6 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; -struct gfn_to_pfn_cache { - u64 generation; - gfn_t gfn; - kvm_pfn_t pfn; - bool dirty; -}; - #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE /* * Memory caches are used to preallocate memory ahead of various MMU flows, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3f6d450355f062287946353b6261868eb03e3779..7a28c29dca8a6b7f5ffc001a5d97e9e37448f685 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) { if (pfn == 0) return; - if (cache) - cache->pfn = cache->gfn = 0; - if (dirty) kvm_release_pfn_dirty(pfn); else kvm_release_pfn_clean(pfn); } -static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, - struct gfn_to_pfn_cache *cache, u64 gen) -{ - kvm_release_pfn(cache->pfn, cache->dirty, cache); - - cache->pfn = gfn_to_pfn_memslot(slot, gfn); - cache->gfn = gfn; - cache->dirty = false; - cache->generation = gen; -} - -static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, - struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, - bool atomic) +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) { kvm_pfn_t pfn; void *hva = NULL; struct page *page = KVM_UNMAPPED_PAGE; - struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); - u64 gen = slots->generation; if (!map) return -EINVAL; - if (cache) { - if (!cache->pfn || cache->gfn != gfn || - cache->generation != gen) { - if (atomic) - return -EAGAIN; - kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); - } - pfn = cache->pfn; - } else { - if (atomic) - return -EAGAIN; - pfn = gfn_to_pfn_memslot(slot, gfn); - } + pfn = gfn_to_pfn(vcpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return -EINVAL; if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - if (atomic) - hva = kmap_atomic(page); - else - hva = kmap(page); + hva = kmap(page); #ifdef CONFIG_HAS_IOMEM - } else if (!atomic) { - hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); } else { - return -EINVAL; + hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); #endif } @@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, return 0; } - -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool atomic) -{ - return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, - cache, atomic); -} -EXPORT_SYMBOL_GPL(kvm_map_gfn); - -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) -{ - return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, - NULL, false); -} EXPORT_SYMBOL_GPL(kvm_vcpu_map); -static void __kvm_unmap_gfn(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, - bool dirty, bool atomic) +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) { if (!map) return; @@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm, if (!map->hva) return; - if (map->page != KVM_UNMAPPED_PAGE) { - if (atomic) - kunmap_atomic(map->hva); - else - kunmap(map->page); - } + if (map->page != KVM_UNMAPPED_PAGE) + kunmap(map->page); #ifdef CONFIG_HAS_IOMEM - else if (!atomic) - memunmap(map->hva); else - WARN_ONCE(1, "Unexpected unmapping in atomic context"); + memunmap(map->hva); #endif if (dirty) - mark_page_dirty_in_slot(kvm, memslot, map->gfn); + kvm_vcpu_mark_page_dirty(vcpu, map->gfn); - if (cache) - cache->dirty |= dirty; - else - kvm_release_pfn(map->pfn, dirty, NULL); + kvm_release_pfn(map->pfn, dirty); map->hva = NULL; map->page = NULL; } - -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) -{ - __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, - cache, dirty, atomic); - return 0; -} -EXPORT_SYMBOL_GPL(kvm_unmap_gfn); - -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) -{ - __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), - map, NULL, dirty, false); -} EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)