Commit 56083bdf authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: check_nested_events is never NULL



Both Intel and AMD now implement it, so there is no need to check if the
callback is implemented.

Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1d2c6c9b
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -7699,7 +7699,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
	 * from L2 to L1 due to pending L1 events which require exit
	 * from L2 to L1.
	 */
	if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
	if (is_guest_mode(vcpu)) {
		r = kvm_x86_ops.check_nested_events(vcpu);
		if (r != 0)
			return r;
@@ -7761,7 +7761,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
		 * proposal and current concerns.  Perhaps we should be setting
		 * KVM_REQ_EVENT only on certain events and not unconditionally?
		 */
		if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
		if (is_guest_mode(vcpu)) {
			r = kvm_x86_ops.check_nested_events(vcpu);
			if (r != 0)
				return r;
@@ -8527,7 +8527,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)

static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
	if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events)
	if (is_guest_mode(vcpu))
		kvm_x86_ops.check_nested_events(vcpu);

	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&