Commit 9ec19493 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: clear SMM flags before loading state while leaving SMM



RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
loading SMSTATE into architectural state, e.g. by toggling it for
problematic flows, and simply clear HF_SMM_MASK prior to loading
architectural state (from SMRAM save state area).

Reported-by: default avatarJon Doron <arilou@gmail.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Fixes: 5bea5123 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Tested-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c5833c7a
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
	if (ret != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

	/*
	 * Get back to real mode, to prepare a safe state in which to load
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
@@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
		return X86EMUL_UNHANDLEABLE;
	}

	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

	ctxt->ops->post_leave_smm(ctxt);

	return X86EMUL_CONTINUE;
+4 −8
Original line number Diff line number Diff line
@@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
	struct page *page;
	u64 guest;
	u64 vmcb;
	int ret;

	guest = GET_SMSTATE(u64, smstate, 0x7ed8);
	vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);

	if (guest) {
		vcpu->arch.hflags &= ~HF_SMM_MASK;
		nested_vmcb = nested_svm_map(svm, vmcb, &page);
		if (nested_vmcb)
		if (!nested_vmcb)
			return 1;
		enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
		else
			ret = 1;
		vcpu->arch.hflags |= HF_SMM_MASK;
	}
	return ret;
	return 0;
}

static int enable_smi_window(struct kvm_vcpu *vcpu)
+0 −2
Original line number Diff line number Diff line
@@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
	}

	if (vmx->nested.smm.guest_mode) {
		vcpu->arch.hflags &= ~HF_SMM_MASK;
		ret = nested_vmx_enter_non_root_mode(vcpu, false);
		vcpu->arch.hflags |= HF_SMM_MASK;
		if (ret)
			return ret;