Commit c2fe3cd4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Move vendor CR4 validity check to dedicated kvm_x86_ops hook



Split out VMX's checks on CR4.VMXE to a dedicated hook, .is_valid_cr4(),
and invoke the new hook from kvm_valid_cr4().  This fixes an issue where
KVM_SET_SREGS would return success while failing to actually set CR4.

Fixing the issue by explicitly checking kvm_x86_ops.set_cr4()'s return
in __set_sregs() is not a viable option as KVM has already stuffed a
variety of vCPU state.

Note, kvm_valid_cr4() and is_valid_cr4() have different return types and
inverted semantics.  This will be remedied in a future patch.

Fixes: 5e1746d6 ("KVM: nVMX: Allow setting the VMXE bit in CR4")
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20201007014417.29276-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 311a0659
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1115,7 +1115,8 @@ struct kvm_x86_ops {
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
	bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
	int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+7 −2
Original line number Diff line number Diff line
@@ -1682,7 +1682,12 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
	update_cr0_intercept(svm);
}

int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	return true;
}

void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
@@ -1696,7 +1701,6 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
	cr4 |= host_cr4_mce;
	to_svm(vcpu)->vmcb->save.cr4 = cr4;
	vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
	return 0;
}

static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -4212,6 +4216,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.get_cpl = svm_get_cpl,
	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
	.set_cr0 = svm_set_cr0,
	.is_valid_cr4 = svm_is_valid_cr4,
	.set_cr4 = svm_set_cr4,
	.set_efer = svm_set_efer,
	.get_idt = svm_get_idt,
+1 −1
Original line number Diff line number Diff line
@@ -358,7 +358,7 @@ void svm_vcpu_free_msrpm(u32 *msrpm);

int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void svm_flush_tlb(struct kvm_vcpu *vcpu);
void disable_nmi_singlestep(struct vcpu_svm *svm);
bool svm_smi_blocked(struct kvm_vcpu *vcpu);
+1 −1
Original line number Diff line number Diff line
@@ -4814,7 +4814,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
	/*
	 * The Intel VMX Instruction Reference lists a bunch of bits that are
	 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
	 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
	 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
	 * Otherwise, we should fail with #UD.  But most faulting conditions
	 * have already been checked by hardware, prior to the VM-exit for
	 * VMXON.  We do test guest cr4.VMXE because processor CR4 always has
+18 −13
Original line number Diff line number Diff line
@@ -3095,7 +3095,23 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
		vmcs_writel(GUEST_CR3, guest_cr3);
}

int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	/*
	 * We operate under the default treatment of SMM, so VMX cannot be
	 * enabled under SMM.  Note, whether or not VMXE is allowed at all is
	 * handled by kvm_valid_cr4().
	 */
	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
		return false;

	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
		return false;

	return true;
}

void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	/*
@@ -3123,17 +3139,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
		}
	}

	/*
	 * We operate under the default treatment of SMM, so VMX cannot be
	 * enabled under SMM.  Note, whether or not VMXE is allowed at all is
	 * handled by kvm_valid_cr4().
	 */
	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
		return 1;

	if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
		return 1;

	vcpu->arch.cr4 = cr4;
	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);

@@ -3164,7 +3169,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)

	vmcs_writel(CR4_READ_SHADOW, cr4);
	vmcs_writel(GUEST_CR4, hw_cr4);
	return 0;
}

void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
@@ -7612,6 +7616,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
	.get_cpl = vmx_get_cpl,
	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
	.set_cr0 = vmx_set_cr0,
	.is_valid_cr4 = vmx_is_valid_cr4,
	.set_cr4 = vmx_set_cr4,
	.set_efer = vmx_set_efer,
	.get_idt = vmx_get_idt,
Loading