Commit 33b22172 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: move nested-related kvm_x86_ops to a separate struct



Clean up some of the patching of kvm_x86_ops, by moving kvm_x86_ops related to
nested virtualization into a separate struct.

As a result, these ops will always be non-NULL on VMX.  This is not a problem:

* check_nested_events is only called if is_guest_mode(vcpu) returns true

* get_nested_state treats VMXOFF state the same as nested being disabled

* set_nested_state fails if you attempt to set nested state while
  nesting is disabled

* nested_enable_evmcs could already be called on a CPU without VMX enabled
  in CPUID.

* nested_get_evmcs_version was fixed in the previous patch

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 25091990
Loading
Loading
Loading
Loading
+16 −13
Original line number Diff line number Diff line
@@ -1178,7 +1178,6 @@ struct kvm_x86_ops {
			       struct x86_exception *exception);
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);

	int (*check_nested_events)(struct kvm_vcpu *vcpu);
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1211,6 +1210,7 @@ struct kvm_x86_ops {

	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
	const struct kvm_x86_nested_ops *nested_ops;

	/*
	 * Architecture specific hooks for vCPU blocking due to
@@ -1238,14 +1238,6 @@ struct kvm_x86_ops {

	void (*setup_mce)(struct kvm_vcpu *vcpu);

	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);

	int (*smi_allowed)(struct kvm_vcpu *vcpu);
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
@@ -1257,16 +1249,27 @@ struct kvm_x86_ops {

	int (*get_msr_feature)(struct kvm_msr_entry *entry);

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
};

struct kvm_x86_nested_ops {
	int (*check_events)(struct kvm_vcpu *vcpu);
	int (*get_state)(struct kvm_vcpu *vcpu,
			 struct kvm_nested_state __user *user_kvm_nested_state,
			 unsigned user_data_size);
	int (*set_state)(struct kvm_vcpu *vcpu,
			 struct kvm_nested_state __user *user_kvm_nested_state,
			 struct kvm_nested_state *kvm_state);
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);

	int (*enable_evmcs)(struct kvm_vcpu *vcpu,
			    uint16_t *vmcs_version);
	uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
};

struct kvm_x86_init_ops {
	int (*cpu_has_kvm_support)(void);
	int (*disabled_by_bios)(void);
+2 −2
Original line number Diff line number Diff line
@@ -1799,8 +1799,8 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
	};
	int i, nent = ARRAY_SIZE(cpuid_entries);

	if (kvm_x86_ops.nested_get_evmcs_version)
		evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
	if (kvm_x86_ops.nested_ops->get_evmcs_version)
		evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);

	/* Skip NESTED_FEATURES if eVMCS is not supported */
	if (!evmcs_ver)
+5 −1
Original line number Diff line number Diff line
@@ -784,7 +784,7 @@ static bool nested_exit_on_intr(struct vcpu_svm *svm)
	return (svm->nested.intercept & 1ULL);
}

int svm_check_nested_events(struct kvm_vcpu *vcpu)
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	bool block_nested_events =
@@ -825,3 +825,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm)

	return NESTED_EXIT_CONTINUE;
}

struct kvm_x86_nested_ops svm_nested_ops = {
	.check_events = svm_check_nested_events,
};
+5 −8
Original line number Diff line number Diff line
@@ -3902,9 +3902,9 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
	/*
	 * TODO: Last condition latch INIT signals on vCPU when
	 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
	 * To properly emulate the INIT intercept, SVM should implement
	 * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
	 * there if an INIT signal is pending.
	 * To properly emulate the INIT intercept,
	 * svm_check_nested_events() should call nested_svm_vmexit()
	 * if an INIT signal is pending.
	 */
	return !gif_set(svm) ||
		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
@@ -4032,6 +4032,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.sched_in = svm_sched_in,

	.pmu_ops = &amd_pmu_ops,
	.nested_ops = &svm_nested_ops,

	.deliver_posted_interrupt = svm_deliver_avic_intr,
	.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
	.update_pi_irte = svm_update_pi_irte,
@@ -4046,14 +4048,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.mem_enc_reg_region = svm_register_enc_region,
	.mem_enc_unreg_region = svm_unregister_enc_region,

	.nested_enable_evmcs = NULL,
	.nested_get_evmcs_version = NULL,

	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,

	.apic_init_signal_blocked = svm_apic_init_signal_blocked,

	.check_nested_events = svm_check_nested_events,
};

static struct kvm_x86_init_ops svm_init_ops __initdata = {
+2 −1
Original line number Diff line number Diff line
@@ -398,9 +398,10 @@ int nested_svm_exit_handled(struct vcpu_svm *svm);
int nested_svm_check_permissions(struct vcpu_svm *svm);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
			       bool has_error_code, u32 error_code);
int svm_check_nested_events(struct kvm_vcpu *vcpu);
int nested_svm_exit_special(struct vcpu_svm *svm);

extern struct kvm_x86_nested_ops svm_nested_ops;

/* avic.c */

#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
Loading