Commit e6abef61 authored by Jason A. Donenfeld's avatar Jason A. Donenfeld Committed by Masahiro Yamada
Browse files

x86: update AS_* macros to binutils >=2.23, supporting ADX and AVX2



Now that the kernel specifies binutils 2.23 as the minimum version, we
can remove ifdefs for AVX2 and ADX throughout.

Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarNick Desaulniers <ndesaulniers@google.com>
Signed-off-by: default avatarMasahiro Yamada <masahiroy@kernel.org>
parent d7e40ea8
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.

config AS_AVX2
	def_bool $(as-instr,vpbroadcastb %xmm0$(comma)%ymm1)
	help
	  Supported by binutils >= 2.22 and LLVM integrated assembler

config AS_AVX512
	def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5)
	help
@@ -20,8 +15,3 @@ config AS_SHA256_NI
	def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1)
	help
	  Supported by binutils >= 2.24 and LLVM integrated assembler

config AS_ADX
	def_bool $(as-instr,adox %eax$(comma)%eax)
	help
	  Supported by binutils >= 2.23 and LLVM integrated assembler
+2 −4
Original line number Diff line number Diff line
@@ -47,8 +47,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o

obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o
chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o
chacha-x86_64-$(CONFIG_AS_AVX2) += chacha-avx2-x86_64.o
chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o
chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o

obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
@@ -56,8 +55,7 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o

obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
sha1-ssse3-$(CONFIG_AS_AVX2) += sha1_avx2_x86_64_asm.o
sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o
sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o

obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
+0 −3
Original line number Diff line number Diff line
@@ -1868,7 +1868,6 @@ key_256_finalize:
        ret
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)

#ifdef CONFIG_AS_AVX2
###############################################################################
# GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
# Input: A and B (128-bits each, bit-reflected)
@@ -2836,5 +2835,3 @@ key_256_finalize4:
        FUNC_RESTORE
        ret
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)

#endif /* CONFIG_AS_AVX2 */
+0 −7
Original line number Diff line number Diff line
@@ -233,7 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
	.finalize = &aesni_gcm_finalize_avx_gen2,
};

#ifdef CONFIG_AS_AVX2
/*
 * asmlinkage void aesni_gcm_init_avx_gen4()
 * gcm_data *my_ctx_data, context data
@@ -276,8 +275,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
	.finalize = &aesni_gcm_finalize_avx_gen4,
};

#endif

static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@@ -706,10 +703,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
	if (!enc)
		left -= auth_tag_len;

#ifdef CONFIG_AS_AVX2
	if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
		gcm_tfm = &aesni_gcm_tfm_avx_gen2;
#endif
	if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
		gcm_tfm = &aesni_gcm_tfm_sse;

@@ -1069,12 +1064,10 @@ static int __init aesni_init(void)
	if (!x86_match_cpu(aesni_cpu_id))
		return -ENODEV;
#ifdef CONFIG_X86_64
#ifdef CONFIG_AS_AVX2
	if (boot_cpu_has(X86_FEATURE_AVX2)) {
		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
	} else
#endif
	if (boot_cpu_has(X86_FEATURE_AVX)) {
		pr_info("AVX version of gcm_enc/dec engaged.\n");
		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
+2 −4
Original line number Diff line number Diff line
@@ -79,8 +79,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
		}
	}

	if (IS_ENABLED(CONFIG_AS_AVX2) &&
	    static_branch_likely(&chacha_use_avx2)) {
	if (static_branch_likely(&chacha_use_avx2)) {
		while (bytes >= CHACHA_BLOCK_SIZE * 8) {
			chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
			bytes -= CHACHA_BLOCK_SIZE * 8;
@@ -288,8 +287,7 @@ static int __init chacha_simd_mod_init(void)

	static_branch_enable(&chacha_use_simd);

	if (IS_ENABLED(CONFIG_AS_AVX2) &&
	    boot_cpu_has(X86_FEATURE_AVX) &&
	if (boot_cpu_has(X86_FEATURE_AVX) &&
	    boot_cpu_has(X86_FEATURE_AVX2) &&
	    cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
		static_branch_enable(&chacha_use_avx2);
Loading