Commit 9ef2b48b authored by Will Deacon's avatar Will Deacon
Browse files

KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled



Patching the EL2 exception vectors is integral to the Spectre-v2
workaround, where it can be necessary to execute CPU-specific sequences
to nobble the branch predictor before running the hypervisor text proper.

Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors
to be patched even when KASLR is not enabled.

Fixes: 7a132017e7a5 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE")
Reported-by: default avatarkernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 31c84d6c
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -99,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)

#ifdef CONFIG_RANDOMIZE_BASE
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif

extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+1 −50
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <asm/page.h>
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/cpufeature.h>

/*
@@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
	return ret;
}

#ifdef CONFIG_RANDOMIZE_BASE
/*
 * EL2 vectors can be mapped and rerouted in a number of ways,
 * depending on the kernel configuration and CPU present:
@@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
 * VHE, as we don't have hypervisor-specific mappings. If the system
 * is VHE and yet selects this capability, it will be ignored.
 */
#include <asm/mmu.h>

extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;

/*  This is called on both VHE and !VHE systems */
static inline void *kvm_get_hyp_vector(void)
{
	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
@@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
	return vect;
}

/*  This is only called on a !VHE system */
static inline int kvm_map_vectors(void)
{
	/*
	 * SV2  = ARM64_SPECTRE_V2
	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
	 *
	 * !SV2 + !HEL2 -> use direct vectors
	 *  SV2 + !HEL2 -> use hardened vectors in place
	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
	 *  SV2 +  HEL2 -> use hardened vertors and use exec mapping
	 */
	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
	}

	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;

		/*
		 * Always allocate a spare vector slot, as we don't
		 * know yet which CPUs have a BP hardening slot that
		 * we can reuse.
		 */
		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
		return create_hyp_exec_mappings(vect_pa, size,
						&__kvm_bp_vect_base);
	}

	return 0;
}
#else
static inline void *kvm_get_hyp_vector(void)
{
	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
}

static inline int kvm_map_vectors(void)
{
	return 0;
}
#endif

#define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)

/*
+0 −2
Original line number Diff line number Diff line
@@ -177,7 +177,6 @@ enum mitigation_state arm64_get_spectre_v2_state(void)
}

#ifdef CONFIG_KVM
#ifdef CONFIG_RANDOMIZE_BASE
#include <asm/cacheflush.h>
#include <asm/kvm_asm.h>

@@ -235,7 +234,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
	__this_cpu_write(bp_hardening_data.fn, fn);
}
#endif	/* CONFIG_RANDOMIZE_BASE */
#endif	/* CONFIG_KVM */

static void call_smc_arch_workaround_1(void)
+34 −0
Original line number Diff line number Diff line
@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
	}
}

static int kvm_map_vectors(void)
{
	/*
	 * SV2  = ARM64_SPECTRE_V2
	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
	 *
	 * !SV2 + !HEL2 -> use direct vectors
	 *  SV2 + !HEL2 -> use hardened vectors in place
	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
	 *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
	 */
	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
	}

	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;

		/*
		 * Always allocate a spare vector slot, as we don't
		 * know yet which CPUs have a BP hardening slot that
		 * we can reuse.
		 */
		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
		return create_hyp_exec_mappings(vect_pa, size,
						&__kvm_bp_vect_base);
	}

	return 0;
}

static void cpu_init_hyp_mode(void)
{
	phys_addr_t pgd_ptr;
+1 −2
Original line number Diff line number Diff line
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
		    -DDISABLE_BRANCH_PROFILING		\
		    $(DISABLE_STACKLEAK_PLUGIN)

obj-$(CONFIG_KVM) += vhe/ nvhe/
obj-$(CONFIG_RANDOMIZE_BASE) += smccc_wa.o
obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
Loading