Commit afaf0b2f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection



Replace the kvm_x86_ops pointer in common x86 with an instance of the
struct to save one pointer dereference when invoking functions.  Copy the
struct by value to set the ops during kvm_init().

Arbitrarily use kvm_x86_ops.hardware_enable to track whether or not the
ops have been initialized, i.e. a vendor KVM module has been loaded.

Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200321202603.19355-7-sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 69c6f69a
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -1274,13 +1274,13 @@ struct kvm_arch_async_pf {

extern u64 __read_mostly host_efer;

extern struct kvm_x86_ops *kvm_x86_ops;
extern struct kvm_x86_ops kvm_x86_ops;
extern struct kmem_cache *x86_fpu_cache;

#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return __vmalloc(kvm_x86_ops->vm_size,
	return __vmalloc(kvm_x86_ops.vm_size,
			 GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
void kvm_arch_free_vm(struct kvm *kvm);
@@ -1288,8 +1288,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
	if (kvm_x86_ops.tlb_remote_flush &&
	    !kvm_x86_ops.tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
@@ -1375,7 +1375,7 @@ extern u64 kvm_mce_cap_supported;
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *		   kvm_x86_ops.skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
 *			     retry native execution under certain conditions,
@@ -1669,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)

static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
	if (kvm_x86_ops.vcpu_blocking)
		kvm_x86_ops.vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
	if (kvm_x86_ops.vcpu_unblocking)
		kvm_x86_ops.vcpu_unblocking(vcpu);
}

static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+2 −2
Original line number Diff line number Diff line
@@ -209,7 +209,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
	vcpu->arch.cpuid_nent = cpuid->nent;
	cpuid_fix_nx_cap(vcpu);
	kvm_apic_set_version(vcpu);
	kvm_x86_ops->cpuid_update(vcpu);
	kvm_x86_ops.cpuid_update(vcpu);
	r = kvm_update_cpuid(vcpu);

out:
@@ -232,7 +232,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
		goto out;
	vcpu->arch.cpuid_nent = cpuid->nent;
	kvm_apic_set_version(vcpu);
	kvm_x86_ops->cpuid_update(vcpu);
	kvm_x86_ops.cpuid_update(vcpu);
	r = kvm_update_cpuid(vcpu);
out:
	return r;
+4 −4
Original line number Diff line number Diff line
@@ -1022,7 +1022,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
		addr = gfn_to_hva(kvm, gfn);
		if (kvm_is_error_hva(addr))
			return 1;
		kvm_x86_ops->patch_hypercall(vcpu, instructions);
		kvm_x86_ops.patch_hypercall(vcpu, instructions);
		((unsigned char *)instructions)[3] = 0xc3; /* ret */
		if (__copy_to_user((void __user *)addr, instructions, 4))
			return 1;
@@ -1607,7 +1607,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
	 * hypercall generates UD from non zero cpl and real mode
	 * per HYPER-V spec
	 */
	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
	if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}
@@ -1800,8 +1800,8 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
	};
	int i, nent = ARRAY_SIZE(cpuid_entries);

	if (kvm_x86_ops->nested_get_evmcs_version)
		evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
	if (kvm_x86_ops.nested_get_evmcs_version)
		evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);

	/* Skip NESTED_FEATURES if eVMCS is not supported */
	if (!evmcs_ver)
+5 −5
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
		return 0;

	if (!kvm_register_is_available(vcpu, reg))
		kvm_x86_ops->cache_reg(vcpu, reg);
		kvm_x86_ops.cache_reg(vcpu, reg);

	return vcpu->arch.regs[reg];
}
@@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
	might_sleep();  /* on svm */

	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);

	return vcpu->arch.walk_mmu->pdptrs[index];
}
@@ -117,7 +117,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
	if (tmask & vcpu->arch.cr0_guest_owned_bits)
		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
		kvm_x86_ops.decache_cr0_guest_bits(vcpu);
	return vcpu->arch.cr0 & mask;
}

@@ -130,14 +130,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
	if (tmask & vcpu->arch.cr4_guest_owned_bits)
		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
		kvm_x86_ops.decache_cr4_guest_bits(vcpu);
	return vcpu->arch.cr4 & mask;
}

static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
	return vcpu->arch.cr3;
}

+15 −15
Original line number Diff line number Diff line
@@ -463,7 +463,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
	if (unlikely(vcpu->arch.apicv_active)) {
		/* need to update RVI */
		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
		kvm_x86_ops->hwapic_irr_update(vcpu,
		kvm_x86_ops.hwapic_irr_update(vcpu,
				apic_find_highest_irr(apic));
	} else {
		apic->irr_pending = false;
@@ -488,7 +488,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
	 * just set SVI.
	 */
	if (unlikely(vcpu->arch.apicv_active))
		kvm_x86_ops->hwapic_isr_update(vcpu, vec);
		kvm_x86_ops.hwapic_isr_update(vcpu, vec);
	else {
		++apic->isr_count;
		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -536,7 +536,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
	 * and must be left alone.
	 */
	if (unlikely(vcpu->arch.apicv_active))
		kvm_x86_ops->hwapic_isr_update(vcpu,
		kvm_x86_ops.hwapic_isr_update(vcpu,
					       apic_find_highest_isr(apic));
	else {
		--apic->isr_count;
@@ -674,7 +674,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
	int highest_irr;
	if (apic->vcpu->arch.apicv_active)
		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
		highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
	else
		highest_irr = apic_find_highest_irr(apic);
	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
@@ -1063,7 +1063,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
						       apic->regs + APIC_TMR);
		}

		if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
		if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
			kvm_lapic_set_irr(vector, apic);
			kvm_make_request(KVM_REQ_EVENT, vcpu);
			kvm_vcpu_kick(vcpu);
@@ -1746,7 +1746,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
{
	WARN_ON(preemptible());
	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
	kvm_x86_ops.cancel_hv_timer(apic->vcpu);
	apic->lapic_timer.hv_timer_in_use = false;
}

@@ -1757,13 +1757,13 @@ static bool start_hv_timer(struct kvm_lapic *apic)
	bool expired;

	WARN_ON(preemptible());
	if (!kvm_x86_ops->set_hv_timer)
	if (!kvm_x86_ops.set_hv_timer)
		return false;

	if (!ktimer->tscdeadline)
		return false;

	if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
	if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
		return false;

	ktimer->hv_timer_in_use = true;
@@ -2190,7 +2190,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
		kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);

	if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
		kvm_x86_ops->set_virtual_apic_mode(vcpu);
		kvm_x86_ops.set_virtual_apic_mode(vcpu);

	apic->base_address = apic->vcpu->arch.apic_base &
			     MSR_IA32_APICBASE_BASE;
@@ -2268,9 +2268,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
	vcpu->arch.pv_eoi.msr_val = 0;
	apic_update_ppr(apic);
	if (vcpu->arch.apicv_active) {
		kvm_x86_ops->apicv_post_state_restore(vcpu);
		kvm_x86_ops->hwapic_irr_update(vcpu, -1);
		kvm_x86_ops->hwapic_isr_update(vcpu, -1);
		kvm_x86_ops.apicv_post_state_restore(vcpu);
		kvm_x86_ops.hwapic_irr_update(vcpu, -1);
		kvm_x86_ops.hwapic_isr_update(vcpu, -1);
	}

	vcpu->arch.apic_arb_prio = 0;
@@ -2521,10 +2521,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
	kvm_apic_update_apicv(vcpu);
	apic->highest_isr_cache = -1;
	if (vcpu->arch.apicv_active) {
		kvm_x86_ops->apicv_post_state_restore(vcpu);
		kvm_x86_ops->hwapic_irr_update(vcpu,
		kvm_x86_ops.apicv_post_state_restore(vcpu);
		kvm_x86_ops.hwapic_irr_update(vcpu,
				apic_find_highest_irr(apic));
		kvm_x86_ops->hwapic_isr_update(vcpu,
		kvm_x86_ops.hwapic_isr_update(vcpu,
				apic_find_highest_isr(apic));
	}
	kvm_make_request(KVM_REQ_EVENT, vcpu);
Loading