Commit 52cd0d97 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more KVM updates from Paolo Bonzini:
 "The guest side of the asynchronous page fault work has been delayed to
  5.9 in order to sync with Thomas's interrupt entry rework, but here's
  the rest of the KVM updates for this merge window.

  MIPS:
   - Loongson port

  PPC:
   - Fixes

  ARM:
   - Fixes

  x86:
   - KVM_SET_USER_MEMORY_REGION optimizations
   - Fixes
   - Selftest fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits)
  KVM: x86: do not pass poisoned hva to __kvm_set_memory_region
  KVM: selftests: fix sync_with_host() in smm_test
  KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected
  KVM: async_pf: Cleanup kvm_setup_async_pf()
  kvm: i8254: remove redundant assignment to pointer s
  KVM: x86: respect singlestep when emulating instruction
  KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported
  KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check
  KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
  KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h
  KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
  KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
  KVM: arm64: Remove host_cpu_context member from vcpu structure
  KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr
  KVM: arm64: Handle PtrAuth traps early
  KVM: x86: Unexport x86_fpu_cache and make it static
  KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K
  KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
  KVM: arm64: Stop save/restoring ACTLR_EL1
  KVM: arm64: Add emulation for 32bit guests accessing ACTLR2
  ...
parents d2d5439d 49b3deaa
Loading
Loading
Loading
Loading
+30 −3
Original line number Diff line number Diff line
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);

extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];

/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
/*
 * Obtain the PC-relative address of a kernel symbol
 * s: symbol
 *
 * The goal of this macro is to return a symbol's address based on a
 * PC-relative computation, as opposed to a loading the VA from a
 * constant pool or something similar. This works well for HYP, as an
 * absolute VA is guaranteed to be wrong. Only use this if trying to
 * obtain the address of a symbol (i.e. not something you obtained by
 * following a pointer).
 */
#define hyp_symbol_addr(s)						\
	({								\
		typeof(s) *addr;					\
		asm("adrp	%0, %1\n"				\
		    "add	%0, %0, :lo12:%1\n"			\
		    : "=r" (addr) : "S" (&s));				\
		addr;							\
	})

/*
 * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
 * provided that sym is really a *symbol* and not a pointer obtained from
 * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
 * sparse quiet.
 */
#define __hyp_this_cpu_ptr(sym)						\
	({								\
		void *__ptr = hyp_symbol_addr(sym);			\
		void *__ptr;						\
		__verify_pcpu_ptr(&sym);				\
		__ptr = hyp_symbol_addr(sym);				\
		__ptr += read_sysreg(tpidr_el2);			\
		(typeof(&sym))__ptr;					\
		(typeof(sym) __kernel __force *)__ptr;			\
	 })

#define __hyp_this_cpu_read(sym)					\
+0 −6
Original line number Diff line number Diff line
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}

static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
{
	if (vcpu_has_ptrauth(vcpu))
		vcpu_ptrauth_disable(vcpu);
}

static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.vsesr_el2;
+4 −5
Original line number Diff line number Diff line
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
	struct kvm_guest_debug_arch vcpu_debug_state;
	struct kvm_guest_debug_arch external_debug_state;

	/* Pointer to host CPU context */
	struct kvm_cpu_context *host_cpu_context;

	struct thread_info *host_thread_info;	/* hyp VA */
	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */

@@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
 * CP14 and CP15 live in the same array, as they are backed by the
 * same system registers.
 */
#define vcpu_cp14(v,r)		((v)->arch.ctxt.copro[(r)])
#define vcpu_cp15(v,r)		((v)->arch.ctxt.copro[(r)])
#define CPx_BIAS		IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)

#define vcpu_cp14(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#define vcpu_cp15(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])

struct kvm_vm_stat {
	ulong remote_tlb_flush;
+0 −20
Original line number Diff line number Diff line
@@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)

#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))

/*
 * Obtain the PC-relative address of a kernel symbol
 * s: symbol
 *
 * The goal of this macro is to return a symbol's address based on a
 * PC-relative computation, as opposed to a loading the VA from a
 * constant pool or something similar. This works well for HYP, as an
 * absolute VA is guaranteed to be wrong. Only use this if trying to
 * obtain the address of a symbol (i.e. not something you obtained by
 * following a pointer).
 */
#define hyp_symbol_addr(s)						\
	({								\
		typeof(s) *addr;					\
		asm("adrp	%0, %1\n"				\
		    "add	%0, %0, :lo12:%1\n"			\
		    : "=r" (addr) : "S" (&s));				\
		addr;							\
	})

/*
 * We currently support using a VM-specified IPA size. For backward
 * compatibility, the default IPA size is fixed to 40bits.
+28 −0
Original line number Diff line number Diff line
@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
	[7] = { 4, 4 },		/* FIQ, unused */
};

static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	if (vcpu->arch.sysregs_loaded_on_cpu) {
		kvm_arch_vcpu_put(vcpu);
		return true;
	}

	preempt_enable();
	return false;
}

static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
{
	if (loaded) {
		kvm_arch_vcpu_load(vcpu, smp_processor_id());
		preempt_enable();
	}
}

/*
 * When an exception is taken, most CPSR fields are left unchanged in the
 * handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)

void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
	bool loaded = pre_fault_synchronize(vcpu);

	prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
	post_fault_synchronize(vcpu, loaded);
}

/*
@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
	u32 vect_offset;
	u32 *far, *fsr;
	bool is_lpae;
	bool loaded;

	loaded = pre_fault_synchronize(vcpu);

	if (is_pabt) {
		vect_offset = 12;
@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
		*fsr = DFSR_FSC_EXTABT_nLPAE;
	}

	post_fault_synchronize(vcpu, loaded);
}

void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
Loading