Commit f05e70ac authored by Zhang Xiantao's avatar Zhang Xiantao Committed by Avi Kivity
Browse files

KVM: Portability: Move mmu-related fields to kvm_arch



This patches moves mmu-related fields to kvm_arch.

Signed-off-by: default avatarZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: default avatarCarsten Otte <cotte@de.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent d69fb81f
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -119,14 +119,6 @@ struct kvm {
	int nmemslots;
	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
					KVM_PRIVATE_MEM_SLOTS];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
	unsigned int n_free_mmu_pages;
	unsigned int n_requested_mmu_pages;
	unsigned int n_alloc_mmu_pages;
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
	struct list_head vm_list;
	struct file *filp;
+30 −28
Original line number Diff line number Diff line
@@ -553,7 +553,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
	__free_page(virt_to_page(sp->spt));
	__free_page(virt_to_page(sp->gfns));
	kfree(sp);
	++kvm->n_free_mmu_pages;
	++kvm->arch.n_free_mmu_pages;
}

static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -566,19 +566,19 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
{
	struct kvm_mmu_page *sp;

	if (!vcpu->kvm->n_free_mmu_pages)
	if (!vcpu->kvm->arch.n_free_mmu_pages)
		return NULL;

	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
	list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	ASSERT(is_empty_shadow_page(sp->spt));
	sp->slot_bitmap = 0;
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
	--vcpu->kvm->n_free_mmu_pages;
	--vcpu->kvm->arch.n_free_mmu_pages;
	return sp;
}

@@ -666,7 +666,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &kvm->mmu_page_hash[index];
	bucket = &kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry(sp, node, bucket, hash_link)
		if (sp->gfn == gfn && !sp->role.metaphysical) {
			pgprintk("%s: found role %x\n",
@@ -705,7 +705,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
		 gfn, role.word);
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry(sp, node, bucket, hash_link)
		if (sp->gfn == gfn && sp->role.word == role.word) {
			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
@@ -796,7 +796,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
		hlist_del(&sp->hash_link);
		kvm_mmu_free_page(kvm, sp);
	} else
		list_move(&sp->link, &kvm->active_mmu_pages);
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
	kvm_mmu_reset_last_pte_updated(kvm);
}

@@ -812,26 +812,26 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
	 * change the value
	 */

	if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
	if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
	    kvm_nr_mmu_pages) {
		int n_used_mmu_pages = kvm->n_alloc_mmu_pages
				       - kvm->n_free_mmu_pages;
		int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
				       - kvm->arch.n_free_mmu_pages;

		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
			struct kvm_mmu_page *page;

			page = container_of(kvm->active_mmu_pages.prev,
			page = container_of(kvm->arch.active_mmu_pages.prev,
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
			n_used_mmu_pages--;
		}
		kvm->n_free_mmu_pages = 0;
		kvm->arch.n_free_mmu_pages = 0;
	}
	else
		kvm->n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->n_alloc_mmu_pages;
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->arch.n_alloc_mmu_pages;

	kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
}

static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -845,7 +845,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	r = 0;
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &kvm->mmu_page_hash[index];
	bucket = &kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
		if (sp->gfn == gfn && !sp->role.metaphysical) {
			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
@@ -1362,7 +1362,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
		vcpu->arch.last_pte_updated = NULL;
	}
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
		if (sp->gfn != gfn || sp->role.metaphysical)
			continue;
@@ -1428,10 +1428,10 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)

void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
		struct kvm_mmu_page *sp;

		sp = container_of(vcpu->kvm->active_mmu_pages.prev,
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
		++vcpu->kvm->stat.mmu_recycled;
@@ -1482,8 +1482,8 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *sp;

	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
		sp = container_of(vcpu->kvm->active_mmu_pages.next,
	while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
	}
@@ -1497,10 +1497,12 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)

	ASSERT(vcpu);

	if (vcpu->kvm->n_requested_mmu_pages)
		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
	if (vcpu->kvm->arch.n_requested_mmu_pages)
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_requested_mmu_pages;
	else
		vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_alloc_mmu_pages;
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
@@ -1549,7 +1551,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
	struct kvm_mmu_page *sp;

	list_for_each_entry(sp, &kvm->active_mmu_pages, link) {
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
		int i;
		u64 *pt;

@@ -1568,7 +1570,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;

	list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link)
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
		kvm_mmu_zap_page(kvm, sp);

	kvm_flush_remote_tlbs(kvm);
@@ -1738,7 +1740,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
	struct kvm_mmu_page *sp;
	int i;

	list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
		u64 *pt = sp->spt;

		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
@@ -1774,7 +1776,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
	unsigned long *rmapp;
	gfn_t gfn;

	list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
		if (sp->role.metaphysical)
			continue;

+1 −1
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@

static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
	if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
		__kvm_mmu_free_some_pages(vcpu);
}

+4 −4
Original line number Diff line number Diff line
@@ -1175,7 +1175,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
	mutex_lock(&kvm->lock);

	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
	kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;

	mutex_unlock(&kvm->lock);
	return 0;
@@ -1183,7 +1183,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,

static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
	return kvm->n_alloc_mmu_pages;
	return kvm->arch.n_alloc_mmu_pages;
}

gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -3051,7 +3051,7 @@ struct kvm *kvm_arch_create_vm(void)
	if (!kvm)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&kvm->active_mmu_pages);
	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);

	return kvm;
}
@@ -3130,7 +3130,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
		}
	}

	if (!kvm->n_requested_mmu_pages) {
	if (!kvm->arch.n_requested_mmu_pages) {
		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
	}
+9 −0
Original line number Diff line number Diff line
@@ -266,6 +266,15 @@ struct kvm_mem_alias {
struct kvm_arch{
	int naliases;
	struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];

	unsigned int n_free_mmu_pages;
	unsigned int n_requested_mmu_pages;
	unsigned int n_alloc_mmu_pages;
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
};

struct kvm_vcpu_stat {