Commit 74d8b90a authored by Jiri Slaby's avatar Jiri Slaby Committed by Borislav Petkov
Browse files

x86/asm/crypto: Annotate local functions



Use the newly added SYM_FUNC_START_LOCAL to annotate beginnings of all
functions which do not have ".globl" annotation, but their endings are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

These function names are not prepended with ".L" as they might appear in
call traces and they wouldn't be visible after such change.

To be symmetric, the functions' ENDPROCs are converted to the new
SYM_FUNC_END.

Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-7-jslaby@suse.cz
parent ef77e688
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@
 *   %r8
 *   %r9
 */
__load_partial:
SYM_FUNC_START_LOCAL(__load_partial)
	xor %r9d, %r9d
	pxor MSG, MSG

@@ -123,7 +123,7 @@ __load_partial:

.Lld_partial_8:
	ret
ENDPROC(__load_partial)
SYM_FUNC_END(__load_partial)

/*
 * __store_partial: internal ABI
@@ -137,7 +137,7 @@ ENDPROC(__load_partial)
 *   %r9
 *   %r10
 */
__store_partial:
SYM_FUNC_START_LOCAL(__store_partial)
	mov LEN, %r8
	mov DST, %r9

@@ -181,7 +181,7 @@ __store_partial:

.Lst_partial_1:
	ret
ENDPROC(__store_partial)
SYM_FUNC_END(__store_partial)

/*
 * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv);
+20 −29
Original line number Diff line number Diff line
@@ -1759,7 +1759,7 @@ ENDPROC(aesni_gcm_finalize)

.align 4
_key_expansion_128:
_key_expansion_256a:
SYM_FUNC_START_LOCAL(_key_expansion_256a)
	pshufd $0b11111111, %xmm1, %xmm1
	shufps $0b00010000, %xmm0, %xmm4
	pxor %xmm4, %xmm0
@@ -1770,10 +1770,9 @@ _key_expansion_256a:
	add $0x10, TKEYP
	ret
ENDPROC(_key_expansion_128)
ENDPROC(_key_expansion_256a)
SYM_FUNC_END(_key_expansion_256a)

.align 4
_key_expansion_192a:
SYM_FUNC_START_LOCAL(_key_expansion_192a)
	pshufd $0b01010101, %xmm1, %xmm1
	shufps $0b00010000, %xmm0, %xmm4
	pxor %xmm4, %xmm0
@@ -1795,10 +1794,9 @@ _key_expansion_192a:
	movaps %xmm1, 0x10(TKEYP)
	add $0x20, TKEYP
	ret
ENDPROC(_key_expansion_192a)
SYM_FUNC_END(_key_expansion_192a)

.align 4
_key_expansion_192b:
SYM_FUNC_START_LOCAL(_key_expansion_192b)
	pshufd $0b01010101, %xmm1, %xmm1
	shufps $0b00010000, %xmm0, %xmm4
	pxor %xmm4, %xmm0
@@ -1815,10 +1813,9 @@ _key_expansion_192b:
	movaps %xmm0, (TKEYP)
	add $0x10, TKEYP
	ret
ENDPROC(_key_expansion_192b)
SYM_FUNC_END(_key_expansion_192b)

.align 4
_key_expansion_256b:
SYM_FUNC_START_LOCAL(_key_expansion_256b)
	pshufd $0b10101010, %xmm1, %xmm1
	shufps $0b00010000, %xmm2, %xmm4
	pxor %xmm4, %xmm2
@@ -1828,7 +1825,7 @@ _key_expansion_256b:
	movaps %xmm2, (TKEYP)
	add $0x10, TKEYP
	ret
ENDPROC(_key_expansion_256b)
SYM_FUNC_END(_key_expansion_256b)

/*
 * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1981,8 +1978,7 @@ ENDPROC(aesni_enc)
 *	KEY
 *	TKEYP (T1)
 */
.align 4
_aesni_enc1:
SYM_FUNC_START_LOCAL(_aesni_enc1)
	movaps (KEYP), KEY		# key
	mov KEYP, TKEYP
	pxor KEY, STATE		# round 0
@@ -2025,7 +2021,7 @@ _aesni_enc1:
	movaps 0x70(TKEYP), KEY
	AESENCLAST KEY STATE
	ret
ENDPROC(_aesni_enc1)
SYM_FUNC_END(_aesni_enc1)

/*
 * _aesni_enc4:	internal ABI
@@ -2045,8 +2041,7 @@ ENDPROC(_aesni_enc1)
 *	KEY
 *	TKEYP (T1)
 */
.align 4
_aesni_enc4:
SYM_FUNC_START_LOCAL(_aesni_enc4)
	movaps (KEYP), KEY		# key
	mov KEYP, TKEYP
	pxor KEY, STATE1		# round 0
@@ -2134,7 +2129,7 @@ _aesni_enc4:
	AESENCLAST KEY STATE3
	AESENCLAST KEY STATE4
	ret
ENDPROC(_aesni_enc4)
SYM_FUNC_END(_aesni_enc4)

/*
 * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2173,8 +2168,7 @@ ENDPROC(aesni_dec)
 *	KEY
 *	TKEYP (T1)
 */
.align 4
_aesni_dec1:
SYM_FUNC_START_LOCAL(_aesni_dec1)
	movaps (KEYP), KEY		# key
	mov KEYP, TKEYP
	pxor KEY, STATE		# round 0
@@ -2217,7 +2211,7 @@ _aesni_dec1:
	movaps 0x70(TKEYP), KEY
	AESDECLAST KEY STATE
	ret
ENDPROC(_aesni_dec1)
SYM_FUNC_END(_aesni_dec1)

/*
 * _aesni_dec4:	internal ABI
@@ -2237,8 +2231,7 @@ ENDPROC(_aesni_dec1)
 *	KEY
 *	TKEYP (T1)
 */
.align 4
_aesni_dec4:
SYM_FUNC_START_LOCAL(_aesni_dec4)
	movaps (KEYP), KEY		# key
	mov KEYP, TKEYP
	pxor KEY, STATE1		# round 0
@@ -2326,7 +2319,7 @@ _aesni_dec4:
	AESDECLAST KEY STATE3
	AESDECLAST KEY STATE4
	ret
ENDPROC(_aesni_dec4)
SYM_FUNC_END(_aesni_dec4)

/*
 * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2604,8 +2597,7 @@ ENDPROC(aesni_cbc_dec)
 *	INC:	== 1, in little endian
 *	BSWAP_MASK == endian swapping mask
 */
.align 4
_aesni_inc_init:
SYM_FUNC_START_LOCAL(_aesni_inc_init)
	movaps .Lbswap_mask, BSWAP_MASK
	movaps IV, CTR
	PSHUFB_XMM BSWAP_MASK CTR
@@ -2613,7 +2605,7 @@ _aesni_inc_init:
	MOVQ_R64_XMM TCTR_LOW INC
	MOVQ_R64_XMM CTR TCTR_LOW
	ret
ENDPROC(_aesni_inc_init)
SYM_FUNC_END(_aesni_inc_init)

/*
 * _aesni_inc:		internal ABI
@@ -2630,8 +2622,7 @@ ENDPROC(_aesni_inc_init)
 *	CTR:	== output IV, in little endian
 *	TCTR_LOW: == lower qword of CTR
 */
.align 4
_aesni_inc:
SYM_FUNC_START_LOCAL(_aesni_inc)
	paddq INC, CTR
	add $1, TCTR_LOW
	jnc .Linc_low
@@ -2642,7 +2633,7 @@ _aesni_inc:
	movaps CTR, IV
	PSHUFB_XMM BSWAP_MASK IV
	ret
ENDPROC(_aesni_inc)
SYM_FUNC_END(_aesni_inc)

/*
 * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+10 −10
Original line number Diff line number Diff line
@@ -189,20 +189,20 @@
 * larger and would only be 0.5% faster (on sandy-bridge).
 */
.align 8
roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
	roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
		  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
		  %rcx, (%r9));
	ret;
ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)

.align 8
roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
	roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
		  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
		  %rax, (%r9));
	ret;
ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)

/*
 * IN/OUT:
@@ -722,7 +722,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.text

.align 8
__camellia_enc_blk16:
SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rax: temporary storage, 256 bytes
@@ -806,10 +806,10 @@ __camellia_enc_blk16:
		     %xmm15, %rax, %rcx, 24);

	jmp .Lenc_done;
ENDPROC(__camellia_enc_blk16)
SYM_FUNC_END(__camellia_enc_blk16)

.align 8
__camellia_dec_blk16:
SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rax: temporary storage, 256 bytes
@@ -891,7 +891,7 @@ __camellia_dec_blk16:
	      ((key_table + (24) * 8) + 4)(CTX));

	jmp .Ldec_max24;
ENDPROC(__camellia_dec_blk16)
SYM_FUNC_END(__camellia_dec_blk16)

ENTRY(camellia_ecb_enc_16way)
	/* input:
@@ -1120,7 +1120,7 @@ ENDPROC(camellia_ctr_16way)
	vpxor tmp, iv, iv;

.align 8
camellia_xts_crypt_16way:
SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst (16 blocks)
@@ -1254,7 +1254,7 @@ camellia_xts_crypt_16way:

	FRAME_END
	ret;
ENDPROC(camellia_xts_crypt_16way)
SYM_FUNC_END(camellia_xts_crypt_16way)

ENTRY(camellia_xts_enc_16way)
	/* input:
+10 −10
Original line number Diff line number Diff line
@@ -223,20 +223,20 @@
 * larger and would only marginally faster.
 */
.align 8
roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
	roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
		  %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
		  %rcx, (%r9));
	ret;
ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)

.align 8
roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
	roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
		  %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
		  %rax, (%r9));
	ret;
ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)

/*
 * IN/OUT:
@@ -760,7 +760,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.text

.align 8
__camellia_enc_blk32:
SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rax: temporary storage, 512 bytes
@@ -844,10 +844,10 @@ __camellia_enc_blk32:
		     %ymm15, %rax, %rcx, 24);

	jmp .Lenc_done;
ENDPROC(__camellia_enc_blk32)
SYM_FUNC_END(__camellia_enc_blk32)

.align 8
__camellia_dec_blk32:
SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rax: temporary storage, 512 bytes
@@ -929,7 +929,7 @@ __camellia_dec_blk32:
	      ((key_table + (24) * 8) + 4)(CTX));

	jmp .Ldec_max24;
ENDPROC(__camellia_dec_blk32)
SYM_FUNC_END(__camellia_dec_blk32)

ENTRY(camellia_ecb_enc_32way)
	/* input:
@@ -1222,7 +1222,7 @@ ENDPROC(camellia_ctr_32way)
	vpxor tmp1, iv, iv;

.align 8
camellia_xts_crypt_32way:
SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst (32 blocks)
@@ -1367,7 +1367,7 @@ camellia_xts_crypt_32way:

	FRAME_END
	ret;
ENDPROC(camellia_xts_crypt_32way)
SYM_FUNC_END(camellia_xts_crypt_32way)

ENTRY(camellia_xts_enc_32way)
	/* input:
+4 −4
Original line number Diff line number Diff line
@@ -209,7 +209,7 @@
.text

.align 16
__cast5_enc_blk16:
SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
	/* input:
	 *	%rdi: ctx
	 *	RL1: blocks 1 and 2
@@ -280,10 +280,10 @@ __cast5_enc_blk16:
	outunpack_blocks(RR4, RL4, RTMP, RX, RKM);

	ret;
ENDPROC(__cast5_enc_blk16)
SYM_FUNC_END(__cast5_enc_blk16)

.align 16
__cast5_dec_blk16:
SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
	/* input:
	 *	%rdi: ctx
	 *	RL1: encrypted blocks 1 and 2
@@ -357,7 +357,7 @@ __cast5_dec_blk16:
.L__skip_dec:
	vpsrldq $4, RKR, RKR;
	jmp .L__dec_tail;
ENDPROC(__cast5_dec_blk16)
SYM_FUNC_END(__cast5_dec_blk16)

ENTRY(cast5_ecb_enc_16way)
	/* input:
Loading