Commit 688f1e4b authored by Will Deacon's avatar Will Deacon
Browse files

arm64: Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2



For better or worse, the world knows about "Spectre" and not about
"Branch predictor hardening". Rename ARM64_HARDEN_BRANCH_PREDICTOR to
ARM64_SPECTRE_V2 as part of moving all of the Spectre mitigations into
their own little corner.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent b181048f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@
#define ARM64_HAS_DCPOP				21
#define ARM64_SVE				22
#define ARM64_UNMAP_KERNEL_AT_EL0		23
#define ARM64_HARDEN_BRANCH_PREDICTOR		24
#define ARM64_SPECTRE_V2			24
#define ARM64_HAS_RAS_EXTN			25
#define ARM64_WORKAROUND_843419			26
#define ARM64_HAS_CACHE_IDC			27
+13 −14
Original line number Diff line number Diff line
@@ -435,14 +435,13 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
 * EL2 vectors can be mapped and rerouted in a number of ways,
 * depending on the kernel configuration and CPU present:
 *
 * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
 *   hardening sequence is placed in one of the vector slots, which is
 *   executed before jumping to the real vectors.
 * - If the CPU is affected by Spectre-v2, the hardening sequence is
 *   placed in one of the vector slots, which is executed before jumping
 *   to the real vectors.
 *
 * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
 *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
 *   hardening sequence is mapped next to the idmap page, and executed
 *   before jumping to the real vectors.
 * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
 *   containing the hardening sequence is mapped next to the idmap page,
 *   and executed before jumping to the real vectors.
 *
 * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
 *   empty slot is selected, mapped next to the idmap page, and
@@ -464,7 +463,7 @@ static inline void *kvm_get_hyp_vector(void)
	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
	int slot = -1;

	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
	if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
		slot = data->hyp_vectors_slot;
	}
@@ -485,15 +484,15 @@ static inline void *kvm_get_hyp_vector(void)
static inline int kvm_map_vectors(void)
{
	/*
	 * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
	 * SV2  = ARM64_SPECTRE_V2
	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
	 *
	 * !HBP + !HEL2 -> use direct vectors
	 *  HBP + !HEL2 -> use hardened vectors in place
	 * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
	 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
	 * !SV2 + !HEL2 -> use direct vectors
	 *  SV2 + !HEL2 -> use hardened vectors in place
	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
	 *  SV2 +  HEL2 -> use hardened vertors and use exec mapping
	 */
	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
	}
+1 −1
Original line number Diff line number Diff line
@@ -56,7 +56,7 @@ static inline void arm64_apply_bp_hardening(void)
{
	struct bp_hardening_data *d;

	if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
	if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
		return;

	d = arm64_get_bp_hardening_data();
+1 −1
Original line number Diff line number Diff line
@@ -877,7 +877,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#endif
	{
		.desc = "Branch predictor hardening",
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		.capability = ARM64_SPECTRE_V2,
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
		.matches = check_branch_predictor,
		.cpu_enable = cpu_enable_branch_predictor_hardening,