Commit a9fa7cb6 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: replace is_smm checks with kvm_x86_ops.smi_allowed



Do not hardcode is_smm so that all the architectural conditions for
blocking SMIs are listed in a single place.  Well, in two places because
this introduces some code duplication between Intel and AMD.

This ensures that nested SVM obeys GIF in kvm_vcpu_has_events.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 88c604b6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3783,7 +3783,7 @@ static bool svm_smi_allowed(struct kvm_vcpu *vcpu)
		return false;
	}

	return true;
	return !is_smm(vcpu);
}

static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+1 −1
Original line number Diff line number Diff line
@@ -7680,7 +7680,7 @@ static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
	/* we need a nested vmexit to enter SMM, postpone if run is pending */
	if (to_vmx(vcpu)->nested.nested_run_pending)
		return false;
	return true;
	return !is_smm(vcpu);
}

static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+3 −3
Original line number Diff line number Diff line
@@ -7764,8 +7764,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
	if (kvm_event_needs_reinjection(vcpu))
		return 0;

	if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
	    kvm_x86_ops.smi_allowed(vcpu)) {
	if (vcpu->arch.smi_pending && kvm_x86_ops.smi_allowed(vcpu)) {
		vcpu->arch.smi_pending = false;
		++vcpu->arch.smi_count;
		enter_smm(vcpu);
@@ -10206,7 +10205,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
		return true;

	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
	    (vcpu->arch.smi_pending && !is_smm(vcpu)))
	    (vcpu->arch.smi_pending &&
	     kvm_x86_ops.smi_allowed(vcpu)))
		return true;

	if (kvm_arch_interrupt_allowed(vcpu) &&