Commit 94ce87ef authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global



Rename the memory helpers that will soon be moved to common code and be
made globaly available via linux/kvm_host.h.  "mmu" alone is not a
sufficient namespace for globally available KVM symbols.

Opportunistically add "nr_" in mmu_memory_cache_free_objects() to make
it clear the function returns the number of free objects, as opposed to
freeing existing objects.

Suggested-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-14-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 378f5cd6
Loading
Loading
Loading
Loading
+21 −21
Original line number Diff line number Diff line
@@ -1072,7 +1072,7 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
		return (void *)__get_free_page(gfp_flags);
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
	void *obj;

@@ -1087,12 +1087,12 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
	return 0;
}

static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *mc)
static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
{
	return mc->nobjs;
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs) {
		if (mc->kmem_cache)
@@ -1107,33 +1107,33 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
	int r;

	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
	if (r)
		return r;
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
				       PT64_ROOT_MAX_LEVEL);
	if (r)
		return r;
	if (maybe_indirect) {
		r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
					       PT64_ROOT_MAX_LEVEL);
		if (r)
			return r;
	}
	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
					  PT64_ROOT_MAX_LEVEL);
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
	void *p;

@@ -1147,7 +1147,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)

static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
}

static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -1418,7 +1418,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
	struct kvm_mmu_memory_cache *mc;

	mc = &vcpu->arch.mmu_pte_list_desc_cache;
	return mmu_memory_cache_free_objects(mc);
	return kvm_mmu_memory_cache_nr_free_objects(mc);
}

static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -2090,10 +2090,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
{
	struct kvm_mmu_page *sp;

	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
	if (!direct)
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

	/*