Commit 81867b75 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch 'kvm-arm64/nvhe-hyp-context' into kvmarm-master/next



Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents ae8bd85c a071261d
Loading
Loading
Loading
Loading
+94 −6
Original line number Diff line number Diff line
@@ -38,6 +38,30 @@

#define __SMCCC_WORKAROUND_1_SMC_SZ 36

#define KVM_HOST_SMCCC_ID(id)						\
	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
			   ARM_SMCCC_SMC_64,				\
			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
			   (id))

#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)

#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			1
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid	5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2		8
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		9
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		10
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		11
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14

#ifndef __ASSEMBLY__

#include <linux/mm.h>
@@ -60,10 +84,24 @@
	DECLARE_KVM_VHE_SYM(sym);		\
	DECLARE_KVM_NVHE_SYM(sym)

#if defined(__KVM_NVHE_HYPERVISOR__)

#define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
#define CHOOSE_NVHE_SYM(sym)	sym
/* The nVHE hypervisor shouldn't even try to access VHE symbols */
extern void *__nvhe_undefined_symbol;
#define CHOOSE_VHE_SYM(sym)	__nvhe_undefined_symbol

#elif defined(__KVM_VHE_HYPERVISOR)

#define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
#define CHOOSE_VHE_SYM(sym)	sym
#define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
/* The VHE hypervisor shouldn't even try to access nVHE symbols */
extern void *__vhe_undefined_symbol;
#define CHOOSE_NVHE_SYM(sym)	__vhe_undefined_symbol

#else

#ifndef __KVM_NVHE_HYPERVISOR__
/*
 * BIG FAT WARNINGS:
 *
@@ -77,10 +115,9 @@
 */
#define CHOOSE_HYP_SYM(sym)	(is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
					   : CHOOSE_NVHE_SYM(sym))
#else
/* The nVHE hypervisor shouldn't even try to access anything */
extern void *__nvhe_undefined_symbol;
#define CHOOSE_HYP_SYM(sym)	__nvhe_undefined_symbol
#define CHOOSE_VHE_SYM(sym)	sym
#define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)

#endif

/* Translate a kernel address @ptr into its equivalent linear mapping */
@@ -98,8 +135,10 @@ struct kvm_vcpu;
struct kvm_s2_mmu;

DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_host_vector	CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
#define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)

#ifdef CONFIG_KVM_INDIRECT_VECTORS
@@ -221,6 +260,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

.macro get_loaded_vcpu vcpu, ctxt
	hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

.macro set_loaded_vcpu vcpu, ctxt, tmp
	hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

/*
 * KVM extable for unexpected exceptions.
 * In the same format _asm_extable, but output to a different section so that
@@ -236,6 +285,45 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
	.popsection
.endm

#define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
#define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
#define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)

/*
 * We treat x18 as callee-saved as the host may use it as a platform
 * register (e.g. for shadow call stack).
 */
.macro save_callee_saved_regs ctxt
	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
.endm

.macro restore_callee_saved_regs ctxt
	// We require \ctxt is not x18-x28
	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
.endm

.macro save_sp_el0 ctxt, tmp
	mrs	\tmp,	sp_el0
	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
.endm

.macro restore_sp_el0 ctxt, tmp
	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
	msr	sp_el0, \tmp
.endm

#endif

#endif /* __ARM_KVM_ASM_H__ */
+11 −15
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__

#include <linux/arm-smccc.h>
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/jump_label.h>
@@ -262,8 +263,6 @@ struct kvm_host_data {
	struct kvm_pmu_events pmu_events;
};

typedef struct kvm_host_data kvm_host_data_t;

struct vcpu_reset_state {
	unsigned long	pc;
	unsigned long	r0;
@@ -480,18 +479,15 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);

u64 __kvm_call_hyp(void *hypfn, ...);

#define kvm_call_hyp_nvhe(f, ...)						\
	do {								\
		DECLARE_KVM_NVHE_SYM(f);				\
		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
	} while(0)

#define kvm_call_hyp_nvhe_ret(f, ...)					\
	({								\
		DECLARE_KVM_NVHE_SYM(f);				\
		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
		struct arm_smccc_res res;				\
									\
		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
				  ##__VA_ARGS__, &res);			\
		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
									\
		res.a1;							\
	})

/*
@@ -517,7 +513,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
			ret = f(__VA_ARGS__);				\
			isb();						\
		} else {						\
			ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);	\
			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
		}							\
									\
		ret;							\
@@ -565,7 +561,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data);

static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
+6 −3
Original line number Diff line number Diff line
@@ -12,6 +12,9 @@
#include <asm/alternative.h>
#include <asm/sysreg.h>

DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);

#define read_sysreg_elx(r,nvh,vh)					\
	({								\
		u64 reg;						\
@@ -87,11 +90,11 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
#endif

u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
u64 __guest_enter(struct kvm_vcpu *vcpu);

void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt);
void __noreturn hyp_panic(void);
#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(unsigned long, ...);
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
#endif

#endif /* __ARM64_KVM_HYP_H__ */
+3 −3
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@
.endm

/*
 * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
 * Both ptrauth_switch_to_guest and ptrauth_switch_to_hyp macros will
 * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as
 * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and
 * then proceed ahead with the save/restore of Pointer Authentication
@@ -78,7 +78,7 @@ alternative_else_nop_endif
.L__skip_switch\@:
.endm

.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
alternative_if_not ARM64_HAS_ADDRESS_AUTH
	b	.L__skip_switch\@
alternative_else_nop_endif
@@ -96,7 +96,7 @@ alternative_else_nop_endif
#else /* !CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
.endm
.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
.endm
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASSEMBLY__ */
+2 −0
Original line number Diff line number Diff line
@@ -71,6 +71,8 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_hyp_ctxt);
KVM_NVHE_ALIAS(kvm_hyp_vector);
KVM_NVHE_ALIAS(kvm_vgic_global_state);

/* Kernel constant needed to compute idmap addresses. */
Loading