Commit a2f73400 authored by Tony Luck's avatar Tony Luck Committed by Borislav Petkov
Browse files

x86/mce: Avoid tail copy when machine check terminated a copy from user



In the page fault case it is ok to see if a few more unaligned bytes
can be copied from the source address. Worst case is that the page fault
will be triggered again.

Machine checks are more serious. Just give up at the point where the
main copy loop triggered the #MC and return from the copy code as if
the copy succeeded. The machine check handler will use task_work_add() to
make sure that the task is sent a SIGBUS.

Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20201006210910.21062-5-tony.luck@intel.com
parent 278b917f
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
#include <asm/trapnr.h>

.macro ALIGN_DESTINATION
	/* check for bad alignment of destination */
@@ -221,6 +222,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
 * Try to copy last bytes and clear the rest if needed.
 * Since protection fault in copy_from/to_user is not a normal situation,
 * it is not necessary to optimize tail handling.
 * Don't try to copy the tail if machine check happened
 *
 * Input:
 * rdi destination
@@ -232,11 +234,24 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
 */
SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
	movl %edx,%ecx
	cmp $X86_TRAP_MC,%eax		/* check if X86_TRAP_MC */
	je 3f
1:	rep movsb
2:	mov %ecx,%eax
	ASM_CLAC
	ret

	/*
	 * Return zero to pretend that this copy succeeded. This
	 * is counter-intuitive, but needed to prevent the code
	 * in lib/iov_iter.c from retrying and running back into
	 * the poison cache line again. The machine check handler
	 * will ensure that a SIGBUS is sent to the task.
	 */
3:	xorl %eax,%eax
	ASM_CLAC
	ret

	_ASM_EXTABLE_CPY(1b, 2b)
SYM_CODE_END(.Lcopy_user_handle_tail)