Commit ec6347bb authored by Dan Williams's avatar Dan Williams Committed by Borislav Petkov
Browse files

x86, powerpc: Rename memcpy_mcsafe() to copy_mc_to_{user, kernel}()



In reaction to a proposal to introduce a memcpy_mcsafe_fast()
implementation Linus points out that memcpy_mcsafe() is poorly named
relative to communicating the scope of the interface. Specifically what
addresses are valid to pass as source, destination, and what faults /
exceptions are handled.

Of particular concern is that even though x86 might be able to handle
the semantics of copy_mc_to_user() with its common copy_user_generic()
implementation other archs likely need / want an explicit path for this
case:

  On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote:
  >
  > On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote:
  > >
  > > However now I see that copy_user_generic() works for the wrong reason.
  > > It works because the exception on the source address due to poison
  > > looks no different than a write fault on the user address to the
  > > caller, it's still just a short copy. So it makes copy_to_user() work
  > > for the wrong reason relative to the name.
  >
  > Right.
  >
  > And it won't work that way on other architectures. On x86, we have a
  > generic function that can take faults on either side, and we use it
  > for both cases (and for the "in_user" case too), but that's an
  > artifact of the architecture oddity.
  >
  > In fact, it's probably wrong even on x86 - because it can hide bugs -
  > but writing those things is painful enough that everybody prefers
  > having just one function.

Replace a single top-level memcpy_mcsafe() with either
copy_mc_to_user(), or copy_mc_to_kernel().

Introduce an x86 copy_mc_fragile() name as the rename for the
low-level x86 implementation formerly named memcpy_mcsafe(). It is used
as the slow / careful backend that is supplanted by a fast
copy_mc_generic() in a follow-on patch.

One side-effect of this reorganization is that separating copy_mc_64.S
to its own file means that perf no longer needs to track dependencies
for its memcpy_64.S benchmarks.

 [ bp: Massage a bit. ]

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarTony Luck <tony.luck@intel.com>
Acked-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Cc: <stable@vger.kernel.org>
Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com
Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
parent ed9705e4
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -136,7 +136,7 @@ config PPC
	select ARCH_HAS_STRICT_KERNEL_RWX	if (PPC32 && !HIBERNATION)
	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
	select ARCH_HAS_UACCESS_FLUSHCACHE
	select ARCH_HAS_UACCESS_MCSAFE		if PPC64
	select ARCH_HAS_COPY_MC			if PPC64
	select ARCH_HAS_UBSAN_SANITIZE_ALL
	select ARCH_HAVE_NMI_SAFE_CMPXCHG
	select ARCH_KEEP_MEMBLOCK
+0 −2
Original line number Diff line number Diff line
@@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
#define __HAVE_ARCH_MEMCPY_MCSAFE

extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
+26 −14
Original line number Diff line number Diff line
@@ -435,6 +435,32 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
		const void __user *from, unsigned long size);

#ifdef CONFIG_ARCH_HAS_COPY_MC
unsigned long __must_check
copy_mc_generic(void *to, const void *from, unsigned long size);

static inline unsigned long __must_check
copy_mc_to_kernel(void *to, const void *from, unsigned long size)
{
	return copy_mc_generic(to, from, size);
}
#define copy_mc_to_kernel copy_mc_to_kernel

static inline unsigned long __must_check
copy_mc_to_user(void __user *to, const void *from, unsigned long n)
{
	if (likely(check_copy_size(from, n, true))) {
		if (access_ok(to, n)) {
			allow_write_to_user(to, n);
			n = copy_mc_generic((void *)to, from, n);
			prevent_write_to_user(to, n);
		}
	}

	return n;
}
#endif

#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -523,20 +549,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
	return ret;
}

static __always_inline unsigned long __must_check
copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
{
	if (likely(check_copy_size(from, n, true))) {
		if (access_ok(to, n)) {
			allow_write_to_user(to, n);
			n = memcpy_mcsafe((void *)to, from, n);
			prevent_write_to_user(to, n);
		}
	}

	return n;
}

unsigned long __arch_clear_user(void __user *addr, unsigned long size);

static inline unsigned long clear_user(void __user *addr, unsigned long size)
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
			       memcpy_power7.o

obj64-y	+= copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
	   memcpy_64.o memcpy_mcsafe_64.o
	   memcpy_64.o copy_mc_64.o

ifndef CONFIG_PPC_QUEUED_SPINLOCKS
obj64-$(CONFIG_SMP)	+= locks.o
+2 −2
Original line number Diff line number Diff line
@@ -50,7 +50,7 @@ err3; stb r0,0(r3)
	blr


_GLOBAL(memcpy_mcsafe)
_GLOBAL(copy_mc_generic)
	mr	r7,r5
	cmpldi	r5,16
	blt	.Lshort_copy
@@ -239,4 +239,4 @@ err1; stb r0,0(r3)
15:	li	r3,0
	blr

EXPORT_SYMBOL_GPL(memcpy_mcsafe);
EXPORT_SYMBOL_GPL(copy_mc_generic);
Loading