Commit ad312c7c authored by Zhang Xiantao's avatar Zhang Xiantao Committed by Avi Kivity
Browse files

KVM: Portability: Introduce kvm_vcpu_arch



Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.

Signed-off-by: default avatarZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: default avatarCarsten Otte <cotte@de.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 682c59a3
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -158,7 +158,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
	if (dest_mode == 0) {	/* Physical mode. */
		if (dest == 0xFF) {	/* Broadcast. */
			for (i = 0; i < KVM_MAX_VCPUS; ++i)
				if (kvm->vcpus[i] && kvm->vcpus[i]->apic)
				if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
					mask |= 1 << i;
			return mask;
		}
@@ -166,8 +166,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
			vcpu = kvm->vcpus[i];
			if (!vcpu)
				continue;
			if (kvm_apic_match_physical_addr(vcpu->apic, dest)) {
				if (vcpu->apic)
			if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
				if (vcpu->arch.apic)
					mask = 1 << i;
				break;
			}
@@ -177,8 +177,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
			vcpu = kvm->vcpus[i];
			if (!vcpu)
				continue;
			if (vcpu->apic &&
			    kvm_apic_match_logical_addr(vcpu->apic, dest))
			if (vcpu->arch.apic &&
			    kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
				mask |= 1 << vcpu->vcpu_id;
		}
	ioapic_debug("mask %x\n", mask);
+1 −1
Original line number Diff line number Diff line
@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
	if (vmf->pgoff == 0)
		page = virt_to_page(vcpu->run);
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
		page = virt_to_page(vcpu->pio_data);
		page = virt_to_page(vcpu->arch.pio_data);
	else
		return VM_FAULT_SIGBUS;
	get_page(page);
+46 −45
Original line number Diff line number Diff line
@@ -58,6 +58,7 @@

#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)

static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
{
	return *((u32 *) (apic->regs + reg_off));
@@ -90,7 +91,7 @@ static inline void apic_clear_vector(int vec, void *bitmap)

static inline int apic_hw_enabled(struct kvm_lapic *apic)
{
	return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE;
	return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
}

static inline int  apic_sw_enabled(struct kvm_lapic *apic)
@@ -174,7 +175,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)

int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;
	int highest_irr;

	if (!apic)
@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);

int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (!apic_test_and_set_irr(vec, apic)) {
		/* a new pending irq is set in IRR */
@@ -272,7 +273,7 @@ static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
			   int short_hand, int dest, int dest_mode)
{
	int result = 0;
	struct kvm_lapic *target = vcpu->apic;
	struct kvm_lapic *target = vcpu->arch.apic;

	apic_debug("target %p, source %p, dest 0x%x, "
		   "dest_mode 0x%x, short_hand 0x%x",
@@ -339,10 +340,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
		} else
			apic_clear_vector(vector, apic->regs + APIC_TMR);

		if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
		if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
			kvm_vcpu_kick(vcpu);
		else if (vcpu->mp_state == VCPU_MP_STATE_HALTED) {
			vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
		else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
			vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
			if (waitqueue_active(&vcpu->wq))
				wake_up_interruptible(&vcpu->wq);
		}
@@ -363,11 +364,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,

	case APIC_DM_INIT:
		if (level) {
			if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
			if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
				printk(KERN_DEBUG
				       "INIT on a runnable vcpu %d\n",
				       vcpu->vcpu_id);
			vcpu->mp_state = VCPU_MP_STATE_INIT_RECEIVED;
			vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
			kvm_vcpu_kick(vcpu);
		} else {
			printk(KERN_DEBUG
@@ -380,9 +381,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
	case APIC_DM_STARTUP:
		printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
		       vcpu->vcpu_id, vector);
		if (vcpu->mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
			vcpu->sipi_vector = vector;
			vcpu->mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
		if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
			vcpu->arch.sipi_vector = vector;
			vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
			if (waitqueue_active(&vcpu->wq))
				wake_up_interruptible(&vcpu->wq);
		}
@@ -411,7 +412,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
			next = 0;
		if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
			continue;
		apic = kvm->vcpus[next]->apic;
		apic = kvm->vcpus[next]->arch.apic;
		if (apic && apic_enabled(apic))
			break;
		apic = NULL;
@@ -482,12 +483,12 @@ static void apic_send_ipi(struct kvm_lapic *apic)
		if (!vcpu)
			continue;

		if (vcpu->apic &&
		if (vcpu->arch.apic &&
		    apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
			if (delivery_mode == APIC_DM_LOWEST)
				set_bit(vcpu->vcpu_id, &lpr_map);
			else
				__apic_accept_irq(vcpu->apic, delivery_mode,
				__apic_accept_irq(vcpu->arch.apic, delivery_mode,
						  vector, level, trig_mode);
		}
	}
@@ -495,7 +496,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
	if (delivery_mode == APIC_DM_LOWEST) {
		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
		if (target != NULL)
			__apic_accept_irq(target->apic, delivery_mode,
			__apic_accept_irq(target->arch.apic, delivery_mode,
					  vector, level, trig_mode);
	}
}
@@ -772,15 +773,15 @@ static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)

void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
	if (!vcpu->apic)
	if (!vcpu->arch.apic)
		return;

	hrtimer_cancel(&vcpu->apic->timer.dev);
	hrtimer_cancel(&vcpu->arch.apic->timer.dev);

	if (vcpu->apic->regs_page)
		__free_page(vcpu->apic->regs_page);
	if (vcpu->arch.apic->regs_page)
		__free_page(vcpu->arch.apic->regs_page);

	kfree(vcpu->apic);
	kfree(vcpu->arch.apic);
}

/*
@@ -791,7 +792,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)

void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (!apic)
		return;
@@ -800,7 +801,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)

u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;
	u64 tpr;

	if (!apic)
@@ -813,29 +814,29 @@ EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);

void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (!apic) {
		value |= MSR_IA32_APICBASE_BSP;
		vcpu->apic_base = value;
		vcpu->arch.apic_base = value;
		return;
	}
	if (apic->vcpu->vcpu_id)
		value &= ~MSR_IA32_APICBASE_BSP;

	vcpu->apic_base = value;
	apic->base_address = apic->vcpu->apic_base &
	vcpu->arch.apic_base = value;
	apic->base_address = apic->vcpu->arch.apic_base &
			     MSR_IA32_APICBASE_BASE;

	/* with FSB delivery interrupt, we can restart APIC functionality */
	apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
		   "0x%lx.\n", apic->vcpu->apic_base, apic->base_address);
		   "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);

}

u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
{
	return vcpu->apic_base;
	return vcpu->arch.apic_base;
}
EXPORT_SYMBOL_GPL(kvm_lapic_get_base);

@@ -847,7 +848,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
	apic_debug("%s\n", __FUNCTION__);

	ASSERT(vcpu);
	apic = vcpu->apic;
	apic = vcpu->arch.apic;
	ASSERT(apic != NULL);

	/* Stop the timer in case it's a reset to an active apic */
@@ -878,19 +879,19 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
	update_divide_count(apic);
	atomic_set(&apic->timer.pending, 0);
	if (vcpu->vcpu_id == 0)
		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
		vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
	apic_update_ppr(apic);

	apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
		   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
		   vcpu, kvm_apic_id(apic),
		   vcpu->apic_base, apic->base_address);
		   vcpu->arch.apic_base, apic->base_address);
}
EXPORT_SYMBOL_GPL(kvm_lapic_reset);

int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;
	int ret = 0;

	if (!apic)
@@ -915,7 +916,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)

	atomic_inc(&apic->timer.pending);
	if (waitqueue_active(q)) {
		apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
		apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
		wake_up_interruptible(q);
	}
	if (apic_lvtt_period(apic)) {
@@ -961,7 +962,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
	if (!apic)
		goto nomem;

	vcpu->apic = apic;
	vcpu->arch.apic = apic;

	apic->regs_page = alloc_page(GFP_KERNEL);
	if (apic->regs_page == NULL) {
@@ -976,7 +977,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
	hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	apic->timer.dev.function = apic_timer_fn;
	apic->base_address = APIC_DEFAULT_PHYS_BASE;
	vcpu->apic_base = APIC_DEFAULT_PHYS_BASE;
	vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;

	kvm_lapic_reset(vcpu);
	apic->dev.read = apic_mmio_read;
@@ -994,7 +995,7 @@ EXPORT_SYMBOL_GPL(kvm_create_lapic);

int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;
	int highest_irr;

	if (!apic || !apic_enabled(apic))
@@ -1010,11 +1011,11 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)

int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
	u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0);
	u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
	int r = 0;

	if (vcpu->vcpu_id == 0) {
		if (!apic_hw_enabled(vcpu->apic))
		if (!apic_hw_enabled(vcpu->arch.apic))
			r = 1;
		if ((lvt0 & APIC_LVT_MASKED) == 0 &&
		    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
@@ -1025,7 +1026,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)

void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
		atomic_read(&apic->timer.pending) > 0) {
@@ -1036,7 +1037,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)

void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
		apic->timer.last_update = ktime_add_ns(
@@ -1047,7 +1048,7 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
	int vector = kvm_apic_has_interrupt(vcpu);
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (vector == -1)
		return -1;
@@ -1060,9 +1061,9 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)

void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	apic->base_address = vcpu->apic_base &
	apic->base_address = vcpu->arch.apic_base &
			     MSR_IA32_APICBASE_BASE;
	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
	apic_update_ppr(apic);
@@ -1073,7 +1074,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)

void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->apic;
	struct kvm_lapic *apic = vcpu->arch.apic;
	struct hrtimer *timer;

	if (!apic)
+71 −71
Original line number Diff line number Diff line
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

static int is_write_protection(struct kvm_vcpu *vcpu)
{
	return vcpu->cr0 & X86_CR0_WP;
	return vcpu->arch.cr0 & X86_CR0_WP;
}

static int is_cpuid_PSE36(void)
@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void)

static int is_nx(struct kvm_vcpu *vcpu)
{
	return vcpu->shadow_efer & EFER_NX;
	return vcpu->arch.shadow_efer & EFER_NX;
}

static int is_present_pte(unsigned long pte)
@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
	int r;

	kvm_mmu_free_some_pages(vcpu);
	r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
				   pte_chain_cache, 4);
	if (r)
		goto out;
	r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
				   rmap_desc_cache, 1);
	if (r)
		goto out;
	r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
	if (r)
		goto out;
	r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
				   mmu_page_header_cache, 4);
out:
	return r;
@@ -311,10 +311,10 @@ out:

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
	mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
				      sizeof(struct kvm_pte_chain));
}

@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc)

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
	return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
				      sizeof(struct kvm_rmap_desc));
}

@@ -568,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
	if (!vcpu->kvm->n_free_mmu_pages)
		return NULL;

	sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
	list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
	ASSERT(is_empty_shadow_page(sp->spt));
@@ -692,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	struct hlist_node *node;

	role.word = 0;
	role.glevels = vcpu->mmu.root_level;
	role.glevels = vcpu->arch.mmu.root_level;
	role.level = level;
	role.metaphysical = metaphysical;
	role.access = access;
	if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link, bucket);
	vcpu->mmu.prefetch_page(vcpu, sp);
	vcpu->arch.mmu.prefetch_page(vcpu, sp);
	if (!metaphysical)
		rmap_write_protect(vcpu->kvm, gfn);
	if (new_page)
@@ -768,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)

	for (i = 0; i < KVM_MAX_VCPUS; ++i)
		if (kvm->vcpus[i])
			kvm->vcpus[i]->last_pte_updated = NULL;
			kvm->vcpus[i]->arch.last_pte_updated = NULL;
}

static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)

struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);

	if (gpa == UNMAPPED_GVA)
		return NULL;
@@ -962,7 +962,7 @@ unshadowed:
	else
		kvm_release_page_clean(page);
	if (!ptwrite || !*ptwrite)
		vcpu->last_pte_updated = shadow_pte;
		vcpu->arch.last_pte_updated = shadow_pte;
}

static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int level = PT32E_ROOT_LEVEL;
	hpa_t table_addr = vcpu->mmu.root_hpa;
	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
	int pt_write = 0;

	for (; ; level--) {
@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->mmu.root_hpa))
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;

		sp = page_header(root);
		--sp->root_count;
		vcpu->mmu.root_hpa = INVALID_PAGE;
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];
		hpa_t root = vcpu->arch.mmu.pae_root[i];

		if (root) {
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			--sp->root_count;
		}
		vcpu->mmu.pae_root[i] = INVALID_PAGE;
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
	}
	vcpu->mmu.root_hpa = INVALID_PAGE;
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
@@ -1055,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
	gfn_t root_gfn;
	struct kvm_mmu_page *sp;

	root_gfn = vcpu->cr3 >> PAGE_SHIFT;
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;

#ifdef CONFIG_X86_64
	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->mmu.root_hpa;
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;

		ASSERT(!VALID_PAGE(root));
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
				      PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
		root = __pa(sp->spt);
		++sp->root_count;
		vcpu->mmu.root_hpa = root;
		vcpu->arch.mmu.root_hpa = root;
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->mmu.pae_root[i];
		hpa_t root = vcpu->arch.mmu.pae_root[i];

		ASSERT(!VALID_PAGE(root));
		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->pdptrs[i])) {
				vcpu->mmu.pae_root[i] = 0;
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
				vcpu->arch.mmu.pae_root[i] = 0;
				continue;
			}
			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
		} else if (vcpu->mmu.root_level == 0)
			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
		} else if (vcpu->arch.mmu.root_level == 0)
			root_gfn = 0;
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
				      PT32_ROOT_LEVEL, !is_paging(vcpu),
				      ACC_ALL, NULL, NULL);
		root = __pa(sp->spt);
		++sp->root_count;
		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
	}
	vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
}

static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
		return r;

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	gfn = gva >> PAGE_SHIFT;

@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu)

static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
{
	struct kvm_mmu *context = &vcpu->mmu;
	struct kvm_mmu *context = &vcpu->arch.mmu;

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)

static int paging32_init_context(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->mmu;
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	if (VALID_PAGE(vcpu->mmu.root_hpa)) {
		vcpu->mmu.free(vcpu);
		vcpu->mmu.root_hpa = INVALID_PAGE;
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		vcpu->arch.mmu.free(vcpu);
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
	}
}

@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
	if (r)
		goto out;
	mmu_alloc_roots(vcpu);
	kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
	kvm_mmu_flush_tlb(vcpu);
out:
	mutex_unlock(&vcpu->kvm->lock);
@@ -1323,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)

static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
	u64 *spte = vcpu->last_pte_updated;
	u64 *spte = vcpu->arch.last_pte_updated;

	return !!(spte && (*spte & PT_ACCESSED_MASK));
}
@@ -1350,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
	++vcpu->kvm->stat.mmu_pte_write;
	kvm_mmu_audit(vcpu, "pre pte write");
	if (gfn == vcpu->last_pt_write_gfn
	if (gfn == vcpu->arch.last_pt_write_gfn
	    && !last_updated_pte_accessed(vcpu)) {
		++vcpu->last_pt_write_count;
		if (vcpu->last_pt_write_count >= 3)
		++vcpu->arch.last_pt_write_count;
		if (vcpu->arch.last_pt_write_count >= 3)
			flooded = 1;
	} else {
		vcpu->last_pt_write_gfn = gfn;
		vcpu->last_pt_write_count = 1;
		vcpu->last_pte_updated = NULL;
		vcpu->arch.last_pt_write_gfn = gfn;
		vcpu->arch.last_pt_write_count = 1;
		vcpu->arch.last_pte_updated = NULL;
	}
	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
	bucket = &vcpu->kvm->mmu_page_hash[index];
@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,

int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);

	return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
}
@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
	enum emulation_result er;

	mutex_lock(&vcpu->kvm->lock);
	r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
	if (r < 0)
		goto out;

@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
	}
	free_page((unsigned long)vcpu->mmu.pae_root);
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
	vcpu->mmu.pae_root = page_address(page);
	vcpu->arch.mmu.pae_root = page_address(page);
	for (i = 0; i < 4; ++i)
		vcpu->mmu.pae_root[i] = INVALID_PAGE;
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;

	return 0;

@@ -1522,7 +1522,7 @@ error_1:
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

	return alloc_mmu_pages(vcpu);
}
@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

	return init_kvm_mmu(vcpu);
}
@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				printk(KERN_ERR "audit: (%s) nontrapping pte"
				       " in nonleaf level: levels %d gva %lx"
				       " level %d pte %llx\n", audit_msg,
				       vcpu->mmu.root_level, va, level, ent);
				       vcpu->arch.mmu.root_level, va, level, ent);

			audit_mappings_page(vcpu, ent, va, level - 1);
		} else {
			gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
			struct page *page = gpa_to_page(vcpu, gpa);
			hpa_t hpa = page_to_phys(page);

@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
				       audit_msg, vcpu->mmu.root_level,
				       audit_msg, vcpu->arch.mmu.root_level,
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
			else if (ent == shadow_notrap_nonpresent_pte
@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
{
	unsigned i;

	if (vcpu->mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
	else
		for (i = 0; i < 4; ++i)
			if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
				audit_mappings_page(vcpu,
						    vcpu->mmu.pae_root[i],
						    vcpu->arch.mmu.pae_root[i],
						    i << 30,
						    2);
}
+8 −8
Original line number Diff line number Diff line
@@ -129,11 +129,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,

	pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
walk:
	walker->level = vcpu->mmu.root_level;
	pte = vcpu->cr3;
	walker->level = vcpu->arch.mmu.root_level;
	pte = vcpu->arch.cr3;
#if PTTYPE == 64
	if (!is_long_mode(vcpu)) {
		pte = vcpu->pdptrs[(addr >> 30) & 3];
		pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
		if (!is_present_pte(pte))
			goto not_present;
		--walker->level;
@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
	if (!is_present_pte(walker->ptes[walker->level - 1]))
		return NULL;

	shadow_addr = vcpu->mmu.root_hpa;
	level = vcpu->mmu.shadow_root_level;
	shadow_addr = vcpu->arch.mmu.root_hpa;
	level = vcpu->arch.mmu.shadow_root_level;
	if (level == PT32E_ROOT_LEVEL) {
		shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
		shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
		shadow_addr &= PT64_BASE_ADDR_MASK;
		--level;
	}
@@ -380,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
	if (!r) {
		pgprintk("%s: guest page fault\n", __FUNCTION__);
		inject_page_fault(vcpu, addr, walker.error_code);
		vcpu->last_pt_write_count = 0; /* reset fork detector */
		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
		return 0;
	}

@@ -390,7 +390,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
		 shadow_pte, *shadow_pte, write_pt);

	if (!write_pt)
		vcpu->last_pt_write_count = 0; /* reset fork detector */
		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */

	/*
	 * mmio: emulate if accessible, otherwise its a guest fault.
Loading