Commit 3693ca81 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

x86/uaccess: Move copy_user_handle_tail() into asm



By writing the function in asm we avoid cross object code flow and
objtool no longer gets confused about a 'stray' CLAC.

Also; the asm version is actually _simpler_.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8f4faed0
Loading
Loading
Loading
Loading
+0 −24
Original line number Diff line number Diff line
@@ -148,30 +148,6 @@
	_ASM_PTR (entry);					\
	.popsection

.macro ALIGN_DESTINATION
	/* check for bad alignment of destination */
	movl %edi,%ecx
	andl $7,%ecx
	jz 102f				/* already aligned */
	subl $8,%ecx
	negl %ecx
	subl %ecx,%edx
100:	movb (%rsi),%al
101:	movb %al,(%rdi)
	incq %rsi
	incq %rdi
	decl %ecx
	jnz 100b
102:
	.section .fixup,"ax"
103:	addl %ecx,%edx			/* ecx is zerorest also */
	jmp copy_user_handle_tail
	.previous

	_ASM_EXTABLE_UA(100b, 103b)
	_ASM_EXTABLE_UA(101b, 103b)
	.endm

#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler)			\
+0 −3
Original line number Diff line number Diff line
@@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
	return __copy_user_flushcache(dst, src, size);
}

unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len);

unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);

+48 −0
Original line number Diff line number Diff line
@@ -16,6 +16,30 @@
#include <asm/smap.h>
#include <asm/export.h>

.macro ALIGN_DESTINATION
	/* check for bad alignment of destination */
	movl %edi,%ecx
	andl $7,%ecx
	jz 102f				/* already aligned */
	subl $8,%ecx
	negl %ecx
	subl %ecx,%edx
100:	movb (%rsi),%al
101:	movb %al,(%rdi)
	incq %rsi
	incq %rdi
	decl %ecx
	jnz 100b
102:
	.section .fixup,"ax"
103:	addl %ecx,%edx			/* ecx is zerorest also */
	jmp copy_user_handle_tail
	.previous

	_ASM_EXTABLE_UA(100b, 103b)
	_ASM_EXTABLE_UA(101b, 103b)
	.endm

/*
 * copy_user_generic_unrolled - memory copy with exception handling.
 * This version is for CPUs like P4 that don't have efficient micro
@@ -193,6 +217,30 @@ ENTRY(copy_user_enhanced_fast_string)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)

/*
 * Try to copy last bytes and clear the rest if needed.
 * Since protection fault in copy_from/to_user is not a normal situation,
 * it is not necessary to optimize tail handling.
 *
 * Input:
 * rdi destination
 * rsi source
 * rdx count
 *
 * Output:
 * eax uncopied bytes or 0 if successful.
 */
ALIGN;
copy_user_handle_tail:
	movl %edx,%ecx
1:	rep movsb
2:	mov %ecx,%eax
	ASM_CLAC
	ret

	_ASM_EXTABLE_UA(1b, 2b)
ENDPROC(copy_user_handle_tail)

/*
 * copy_user_nocache - Uncached memory copy with exception handling
 * This will force destination out of cache for more performance.
+0 −20
Original line number Diff line number Diff line
@@ -54,26 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);

/*
 * Try to copy last bytes and clear the rest if needed.
 * Since protection fault in copy_from/to_user is not a normal situation,
 * it is not necessary to optimize tail handling.
 */
__visible unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len)
{
	for (; len; --len, to++) {
		char c;

		if (__get_user_nocheck(c, from++, sizeof(char)))
			break;
		if (__put_user_nocheck(c, to, sizeof(char)))
			break;
	}
	clac();
	return len;
}

/*
 * Similar to copy_user_handle_tail, probe for the write fault point,
 * but reuse __memcpy_mcsafe in case a new read error is encountered.