Commit 26d05b36 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge branch 'kvm-async-pf-int' into HEAD

parents 0ed076c7 b1d40575
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -802,6 +802,7 @@ config KVM_GUEST
	depends on PARAVIRT
	select PARAVIRT_CLOCK
	select ARCH_CPUIDLE_HALTPOLL
	select X86_HV_CALLBACK_VECTOR
	default y
	help
	  This option enables various optimizations for running under the KVM
+4 −0
Original line number Diff line number Diff line
@@ -647,6 +647,10 @@ DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback);
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
#endif

#ifdef CONFIG_KVM_GUEST
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_kvm_asyncpf_interrupt);
#endif

#undef X86_TRAP_OTHER

#endif
+1 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@

#include <asm/processor.h>
#include <asm/alternative.h>
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>

extern void kvmclock_init(void);
+34 −13
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
@@ -232,16 +233,11 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);

noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
	u32 reason = kvm_read_and_reset_apf_flags();
	u32 flags = kvm_read_and_reset_apf_flags();
	bool rcu_exit;

	switch (reason) {
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
	case KVM_PV_REASON_PAGE_READY:
		break;
	default:
	if (!flags)
		return false;
	}

	rcu_exit = idtentry_enter_cond_rcu(regs);
	instrumentation_begin();
@@ -254,13 +250,13 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
	if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
		panic("Host injected async #PF in interrupt disabled region\n");

	if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
	if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
		if (unlikely(!(user_mode(regs))))
			panic("Host injected async #PF in kernel mode\n");
		/* Page is swapped out by the host. */
		kvm_async_pf_task_wait_schedule(token);
	} else {
		kvm_async_pf_task_wake(token);
		WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
	}

	instrumentation_end();
@@ -268,6 +264,27 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
	return true;
}

DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	u32 token;
	bool rcu_exit;

	rcu_exit = idtentry_enter_cond_rcu(regs);

	inc_irq_stat(irq_hv_callback_count);

	if (__this_cpu_read(apf_reason.enabled)) {
		token = __this_cpu_read(apf_reason.token);
		kvm_async_pf_task_wake(token);
		__this_cpu_write(apf_reason.token, 0);
		wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
	}

	idtentry_exit_cond_rcu(regs, rcu_exit);
	set_irq_regs(old_regs);
}

static void __init paravirt_ops_setup(void)
{
	pv_info.name = "KVM";
@@ -311,17 +328,19 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)

static void kvm_guest_cpu_init(void)
{
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
		u64 pa;
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));

		WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));

		pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
		pa |= KVM_ASYNC_PF_ENABLED;
		pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;

		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;

		wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);

		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
		__this_cpu_write(apf_reason.enabled, 1);
		pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
@@ -646,8 +665,10 @@ static void __init kvm_guest_init(void)
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);

	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
		static_branch_enable(&kvm_async_pf_enabled);
		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
	}

#ifdef CONFIG_SMP
	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;