Commit 4d4ec087 authored by Avi Kivity's avatar Avi Kivity Committed by Marcelo Tosatti
Browse files

KVM: Replace read accesses of vcpu->arch.cr0 by an accessor



Since we'd like to allow the guest to own a few bits of cr0 at times, we need
to know when we access those bits.

Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent a1f83a74
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -1515,7 +1515,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)

	/* syscall is not available in real mode */
	if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
		|| !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
	    || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE))
		return -1;

	setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1569,7 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)

	/* inject #GP if in real mode or paging is disabled */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
		!(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
	    !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
		kvm_inject_gp(ctxt->vcpu, 0);
		return -1;
	}
@@ -1635,7 +1635,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)

	/* inject #GP if in real mode or paging is disabled */
	if (ctxt->mode == X86EMUL_MODE_REAL
		|| !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
	    || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
		kvm_inject_gp(ctxt->vcpu, 0);
		return -1;
	}
+10 −0
Original line number Diff line number Diff line
@@ -38,6 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
	return vcpu->arch.pdptrs[index];
}

static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
	return vcpu->arch.cr0 & mask;
}

static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr0_bits(vcpu, ~0UL);
}

static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
	if (mask & vcpu->arch.cr4_guest_owned_bits)
+1 −1
Original line number Diff line number Diff line
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

static int is_write_protection(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.cr0 & X86_CR0_WP;
	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
}

static int is_cpuid_PSE36(void)
+1 −1
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)

static inline int is_paging(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.cr0 & X86_CR0_PG;
	return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
}

static inline int is_present_gpte(unsigned long pte)
+5 −4
Original line number Diff line number Diff line
@@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
	if (npt_enabled)
		goto set;

	if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
	if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
		svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
		vcpu->fpu_active = 1;
	}
@@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm)
static int nm_interception(struct vcpu_svm *svm)
{
	svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
	if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
	if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS))
		svm->vmcb->save.cr0 &= ~X86_CR0_TS;
	svm->vcpu.fpu_active = 1;

@@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
	hsave->save.gdtr   = vmcb->save.gdtr;
	hsave->save.idtr   = vmcb->save.idtr;
	hsave->save.efer   = svm->vcpu.arch.shadow_efer;
	hsave->save.cr0    = svm->vcpu.arch.cr0;
	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
	hsave->save.cr4    = svm->vcpu.arch.cr4;
	hsave->save.rflags = vmcb->save.rflags;
	hsave->save.rip    = svm->next_rip;
@@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)

	if (npt_enabled) {
		int mmu_reload = 0;
		if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
		if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0)
		    & X86_CR0_PG) {
			svm_set_cr0(vcpu, svm->vmcb->save.cr0);
			mmu_reload = 1;
		}
Loading