Commit b1d40575 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini
Browse files

KVM: x86: Switch KVM guest to using interrupts for page ready APF delivery



KVM now supports using interrupt for 'page ready' APF event delivery and
legacy mechanism was deprecated. Switch KVM guests to the new one.

Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200525144125.143875-9-vkuznets@redhat.com>
[Use HYPERVISOR_CALLBACK_VECTOR instead of a separate vector. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 49b3deaa
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -801,6 +801,7 @@ config KVM_GUEST
	depends on PARAVIRT
	select PARAVIRT_CLOCK
	select ARCH_CPUIDLE_HALTPOLL
	select X86_HV_CALLBACK_VECTOR
	default y
	---help---
	  This option enables various optimizations for running under the KVM
+5 −0
Original line number Diff line number Diff line
@@ -1475,6 +1475,11 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,

#endif /* CONFIG_HYPERV */

#ifdef CONFIG_KVM_GUEST
BUILD_INTERRUPT3(kvm_async_pf_vector, HYPERVISOR_CALLBACK_VECTOR,
		 kvm_async_pf_intr)
#endif

SYM_CODE_START(page_fault)
	ASM_CLAC
	pushl	$do_page_fault
+5 −0
Original line number Diff line number Diff line
@@ -1190,6 +1190,11 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
	acrn_hv_callback_vector acrn_hv_vector_handler
#endif

#ifdef CONFIG_KVM_GUEST
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
	kvm_async_pf_vector kvm_async_pf_intr
#endif

idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
idtentry int3			do_int3			has_error_code=0	create_gap=1
idtentry stack_segment		do_stack_segment	has_error_code=1
+7 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@

#include <asm/processor.h>
#include <asm/alternative.h>
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>

extern void kvmclock_init(void);
@@ -104,6 +105,12 @@ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
		return false;
}

extern __visible void kvm_async_pf_vector(void);
#ifdef CONFIG_TRACING
#define trace_kvm_async_pf_vector kvm_async_pf_vector
#endif
__visible void __irq_entry kvm_async_pf_intr(struct pt_regs *regs);

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init kvm_spinlock_init(void);
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+33 −15
Original line number Diff line number Diff line
@@ -233,15 +233,10 @@ NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);

bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
	u32 reason = kvm_read_and_reset_apf_flags();
	u32 flags = kvm_read_and_reset_apf_flags();

	switch (reason) {
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
	case KVM_PV_REASON_PAGE_READY:
		break;
	default:
	if (!flags)
		return false;
	}

	/*
	 * If the host managed to inject an async #PF into an interrupt
@@ -251,19 +246,38 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
	if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
		panic("Host injected async #PF in interrupt disabled region\n");

	if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
	if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
		if (unlikely(!(user_mode(regs))))
			panic("Host injected async #PF in kernel mode\n");
		/* Page is swapped out by the host. */
		kvm_async_pf_task_wait_schedule(token);
	} else {
		return true;
	}

	WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
	return true;
}
NOKPROBE_SYMBOL(__kvm_handle_async_pf);

__visible void __irq_entry kvm_async_pf_intr(struct pt_regs *regs)
{
	u32 token;

	entering_ack_irq();

	inc_irq_stat(irq_hv_callback_count);

	if (__this_cpu_read(apf_reason.enabled)) {
		token = __this_cpu_read(apf_reason.token);
		rcu_irq_enter();
		kvm_async_pf_task_wake(token);
		rcu_irq_exit();
		__this_cpu_write(apf_reason.token, 0);
		wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
	}
	return true;

	exiting_irq();
}
NOKPROBE_SYMBOL(__kvm_handle_async_pf);

static void __init paravirt_ops_setup(void)
{
@@ -308,17 +322,19 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)

static void kvm_guest_cpu_init(void)
{
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
		u64 pa;
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));

		WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));

		pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
		pa |= KVM_ASYNC_PF_ENABLED;
		pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;

		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;

		wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);

		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
		__this_cpu_write(apf_reason.enabled, 1);
		pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
@@ -643,8 +659,10 @@ static void __init kvm_guest_init(void)
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);

	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
		static_branch_enable(&kvm_async_pf_enabled);
		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, kvm_async_pf_vector);
	}

#ifdef CONFIG_SMP
	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;