Commit 91724814 authored by Boris Ostrovsky's avatar Boris Ostrovsky Committed by Paolo Bonzini
Browse files

x86/kvm: Cache gfn to pfn translation



__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
* relatively expensive
* in certain cases (such as when done from atomic context) cannot be called

Stashing gfn-to-pfn mapping should help with both cases.

This is part of CVE-2019-3016.

Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: default avatarJoao Martins <joao.m.martins@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1eff70a9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -689,6 +689,7 @@ struct kvm_vcpu_arch {
		u64 last_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
		struct gfn_to_pfn_cache cache;
	} st;

	u64 tsc_offset;
+10 −0
Original line number Diff line number Diff line
@@ -9088,6 +9088,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
	struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;

	kvm_release_pfn(cache->pfn, cache->dirty, cache);

	kvmclock_reset(vcpu);

@@ -9761,11 +9764,18 @@ out_free:

void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
	struct kvm_vcpu *vcpu;
	int i;

	/*
	 * memslots->generation has been incremented.
	 * mmio generation may have reached its maximum value.
	 */
	kvm_mmu_invalidate_mmio_sptes(kvm, gen);

	/* Force re-initialization of steal_time cache */
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_vcpu_kick(vcpu);
}

int kvm_arch_prepare_memory_region(struct kvm *kvm,
+5 −2
Original line number Diff line number Diff line
@@ -723,6 +723,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn);

void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -775,10 +776,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
		struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+8 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ struct kvm_memslots;

enum kvm_mr_change;

#include <asm/types.h>
#include <linux/types.h>

/*
 * Address types:
@@ -51,4 +51,11 @@ struct gfn_to_hva_cache {
	struct kvm_memory_slot *memslot;
};

struct gfn_to_pfn_cache {
	u64 generation;
	gfn_t gfn;
	kvm_pfn_t pfn;
	bool dirty;
};

#endif /* __KVM_TYPES_H__ */
+79 −19
Original line number Diff line number Diff line
@@ -1821,27 +1821,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);

void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
{
	if (pfn == 0)
		return;

	if (cache)
		cache->pfn = cache->gfn = 0;

	if (dirty)
		kvm_release_pfn_dirty(pfn);
	else
		kvm_release_pfn_clean(pfn);
}

static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
				 struct gfn_to_pfn_cache *cache, u64 gen)
{
	kvm_release_pfn(cache->pfn, cache->dirty, cache);

	cache->pfn = gfn_to_pfn_memslot(slot, gfn);
	cache->gfn = gfn;
	cache->dirty = false;
	cache->generation = gen;
}

static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
			 struct kvm_host_map *map)
			 struct kvm_host_map *map,
			 struct gfn_to_pfn_cache *cache,
			 bool atomic)
{
	kvm_pfn_t pfn;
	void *hva = NULL;
	struct page *page = KVM_UNMAPPED_PAGE;
	struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
	u64 gen = slots->generation;

	if (!map)
		return -EINVAL;

	if (cache) {
		if (!cache->pfn || cache->gfn != gfn ||
			cache->generation != gen) {
			if (atomic)
				return -EAGAIN;
			kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
		}
		pfn = cache->pfn;
	} else {
		if (atomic)
			return -EAGAIN;
		pfn = gfn_to_pfn_memslot(slot, gfn);
	}
	if (is_error_noslot_pfn(pfn))
		return -EINVAL;

	if (pfn_valid(pfn)) {
		page = pfn_to_page(pfn);
		if (atomic)
			hva = kmap_atomic(page);
		else
			hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM
	} else {
	} else if (!atomic) {
		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
	} else {
		return -EINVAL;
#endif
	}

@@ -1856,20 +1901,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
	return 0;
}

int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
		struct gfn_to_pfn_cache *cache, bool atomic)
{
	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
			cache, atomic);
}
EXPORT_SYMBOL_GPL(kvm_map_gfn);

int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
		NULL, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_map);

static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
			struct kvm_host_map *map, bool dirty)
			struct kvm_host_map *map,
			struct gfn_to_pfn_cache *cache,
			bool dirty, bool atomic)
{
	if (!map)
		return;
@@ -1877,34 +1927,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
	if (!map->hva)
		return;

	if (map->page != KVM_UNMAPPED_PAGE)
	if (map->page != KVM_UNMAPPED_PAGE) {
		if (atomic)
			kunmap_atomic(map->hva);
		else
			kunmap(map->page);
	}
#ifdef CONFIG_HAS_IOMEM
	else
	else if (!atomic)
		memunmap(map->hva);
	else
		WARN_ONCE(1, "Unexpected unmapping in atomic context");
#endif

	if (dirty) {
	if (dirty)
		mark_page_dirty_in_slot(memslot, map->gfn);
		kvm_release_pfn_dirty(map->pfn);
	} else {
		kvm_release_pfn_clean(map->pfn);
	}

	if (cache)
		cache->dirty |= dirty;
	else
		kvm_release_pfn(map->pfn, dirty, NULL);

	map->hva = NULL;
	map->page = NULL;
}

int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 
		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
	__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
	__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
			cache, dirty, atomic);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);

void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
	__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
	__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
			dirty, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);