Commit bd5c6b81 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo
Browse files

perf bench: Update the copies of x86's mem{cpy,set}_64.S

And update linux/linkage.h, which requires in turn that we make these
files switch from ENTRY()/ENDPROC() to SYM_FUNC_START()/SYM_FUNC_END():

  tools/perf/arch/arm64/tests/regs_load.S
  tools/perf/arch/arm/tests/regs_load.S
  tools/perf/arch/powerpc/tests/regs_load.S
  tools/perf/arch/x86/tests/regs_load.S

We also need to switch SYM_FUNC_START_LOCAL() to SYM_FUNC_START() for
the functions used directly by 'perf bench', and update
tools/perf/check_headers.sh to ignore those changes when checking if the
kernel original files drifted from the copies we carry.

This is to get the changes from:

  6dcc5627 ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*")
  ef1e0315 ("x86/asm: Make some functions local")
  e9b9d020 ("x86/asm: Annotate aliases")

And address these tools/perf build warnings:

  Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S'
  diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S
  Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S'
  diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-tay3l8x8k11p7y3qcpqh9qh5@git.kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 77b91c1a
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -28,8 +28,8 @@
 * Output:
 * rax original destination
 */
ENTRY(__memcpy)
ENTRY(memcpy)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_LOCAL(memcpy)
	ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
		      "jmp memcpy_erms", X86_FEATURE_ERMS

@@ -41,8 +41,8 @@ ENTRY(memcpy)
	movl %edx, %ecx
	rep movsb
	ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)

@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
 * memcpy_erms() - enhanced fast string memcpy. This is faster and
 * simpler than memcpy. Use memcpy_erms when possible.
 */
ENTRY(memcpy_erms)
SYM_FUNC_START(memcpy_erms)
	movq %rdi, %rax
	movq %rdx, %rcx
	rep movsb
	ret
ENDPROC(memcpy_erms)
SYM_FUNC_END(memcpy_erms)

ENTRY(memcpy_orig)
SYM_FUNC_START(memcpy_orig)
	movq %rdi, %rax

	cmpq $0x20, %rdx
@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)

.Lend:
	retq
ENDPROC(memcpy_orig)
SYM_FUNC_END(memcpy_orig)

#ifndef CONFIG_UML

@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
 * Note that we only catch machine checks when reading the source addresses.
 * Writes to target are posted and don't generate machine checks.
 */
ENTRY(__memcpy_mcsafe)
SYM_FUNC_START(__memcpy_mcsafe)
	cmpl $8, %edx
	/* Less than 8 bytes? Go to byte copy loop */
	jb .L_no_whole_words
@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
	xorl %eax, %eax
.L_done:
	ret
ENDPROC(__memcpy_mcsafe)
SYM_FUNC_END(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)

	.section .fixup, "ax"
+8 −8
Original line number Diff line number Diff line
@@ -18,8 +18,8 @@
 *
 * rax   original destination
 */
ENTRY(memset)
ENTRY(__memset)
SYM_FUNC_START_ALIAS(memset)
SYM_FUNC_START(__memset)
	/*
	 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
	 * to use it when possible. If not available, use fast string instructions.
@@ -42,8 +42,8 @@ ENTRY(__memset)
	rep stosb
	movq %r9,%rax
	ret
ENDPROC(memset)
ENDPROC(__memset)
SYM_FUNC_END(__memset)
SYM_FUNC_END_ALIAS(memset)

/*
 * ISO C memset - set a memory block to a byte value. This function uses
@@ -56,16 +56,16 @@ ENDPROC(__memset)
 *
 * rax   original destination
 */
ENTRY(memset_erms)
SYM_FUNC_START(memset_erms)
	movq %rdi,%r9
	movb %sil,%al
	movq %rdx,%rcx
	rep stosb
	movq %r9,%rax
	ret
ENDPROC(memset_erms)
SYM_FUNC_END(memset_erms)

ENTRY(memset_orig)
SYM_FUNC_START(memset_orig)
	movq %rdi,%r10

	/* expand byte value  */
@@ -136,4 +136,4 @@ ENTRY(memset_orig)
	subq %r8,%rdx
	jmp .Lafter_bad_alignment
.Lfinal:
ENDPROC(memset_orig)
SYM_FUNC_END(memset_orig)
+2 −2
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@

.text
.type perf_regs_load,%function
ENTRY(perf_regs_load)
SYM_FUNC_START(perf_regs_load)
	str r0, [r0, #R0]
	str r1, [r0, #R1]
	str r2, [r0, #R2]
@@ -56,4 +56,4 @@ ENTRY(perf_regs_load)
	str lr, [r0, #PC]	// store pc as lr in order to skip the call
	                        //  to this function
	mov pc, lr
ENDPROC(perf_regs_load)
SYM_FUNC_END(perf_regs_load)
+2 −2
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@
#define LDR_REG(r)	ldr x##r, [x0, 8 * r]
#define SP	(8 * 31)
#define PC	(8 * 32)
ENTRY(perf_regs_load)
SYM_FUNC_START(perf_regs_load)
	STR_REG(0)
	STR_REG(1)
	STR_REG(2)
@@ -44,4 +44,4 @@ ENTRY(perf_regs_load)
	str x30, [x0, #PC]
	LDR_REG(1)
	ret
ENDPROC(perf_regs_load)
SYM_FUNC_END(perf_regs_load)
+4 −4
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@

.text
#ifdef HAVE_ARCH_X86_64_SUPPORT
ENTRY(perf_regs_load)
SYM_FUNC_START(perf_regs_load)
	movq %rax, AX(%rdi)
	movq %rbx, BX(%rdi)
	movq %rcx, CX(%rdi)
@@ -60,9 +60,9 @@ ENTRY(perf_regs_load)
	movq %r14, R14(%rdi)
	movq %r15, R15(%rdi)
	ret
ENDPROC(perf_regs_load)
SYM_FUNC_END(perf_regs_load)
#else
ENTRY(perf_regs_load)
SYM_FUNC_START(perf_regs_load)
	push %edi
	movl 8(%esp), %edi
	movl %eax, AX(%edi)
@@ -88,7 +88,7 @@ ENTRY(perf_regs_load)
	movl $0, FS(%edi)
	movl $0, GS(%edi)
	ret
ENDPROC(perf_regs_load)
SYM_FUNC_END(perf_regs_load)
#endif

/*
Loading