Commit 171a90d7 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Separate the memory caches for shadow pages and gfn arrays



Use separate caches for allocating shadow pages versus gfn arrays.  This
sets the stage for specifying __GFP_ZERO when allocating shadow pages
without incurring extra cost for gfn arrays.

No functional change intended.

Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-10-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 531281ad
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -602,7 +602,8 @@ struct kvm_vcpu_arch {
	struct kvm_mmu *walk_mmu;

	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_shadow_page_cache;
	struct kvm_mmu_memory_cache mmu_gfn_array_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

	/*
+10 −5
Original line number Diff line number Diff line
@@ -1109,8 +1109,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
				   1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
	if (r)
		return r;
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache,
				   2 * PT64_ROOT_MAX_LEVEL);
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
				   PT64_ROOT_MAX_LEVEL);
	if (r)
		return r;
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
				   PT64_ROOT_MAX_LEVEL);
	if (r)
		return r;
	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
@@ -1120,7 +1124,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}

@@ -2082,9 +2087,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
	struct kvm_mmu_page *sp;

	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
	if (!direct)
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

	/*