Commit beba3a20 authored by Al Viro's avatar Al Viro
Browse files

x86: switch to RAW_COPY_USER



Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a41e0d75
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -175,6 +175,7 @@ config X86
	select USER_STACKTRACE_SUPPORT
	select VIRT_TO_BUS
	select X86_FEATURE_NAMES		if PROC_FS
	select ARCH_HAS_RAW_COPY_USER

config INSTRUCTION_DECODER
	def_bool y
+0 −53
Original line number Diff line number Diff line
@@ -682,59 +682,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h>
#endif

unsigned long __must_check _copy_from_user(void *to, const void __user *from,
					   unsigned n);
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
					 unsigned n);

extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);

static inline void copy_user_overflow(int size, unsigned long count)
{
	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}

static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
	int sz = __compiletime_object_size(to);

	might_fault();

	kasan_check_write(to, n);

	if (likely(sz < 0 || sz >= n)) {
		check_object_size(to, n, false);
		n = _copy_from_user(to, from, n);
	} else if (!__builtin_constant_p(n))
		copy_user_overflow(sz, n);
	else
		__bad_copy_user();

	return n;
}

static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
	int sz = __compiletime_object_size(from);

	kasan_check_read(from, n);

	might_fault();

	if (likely(sz < 0 || sz >= n)) {
		check_object_size(from, n, true);
		n = _copy_to_user(to, from, n);
	} else if (!__builtin_constant_p(n))
		copy_user_overflow(sz, n);
	else
		__bad_copy_user();

	return n;
}

/*
 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 * nested NMI paths are careful to preserve CR2.
+15 −80
Original line number Diff line number Diff line
@@ -8,113 +8,48 @@
#include <asm/asm.h>
#include <asm/page.h>

unsigned long __must_check __copy_to_user_ll
		(void __user *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
		(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_user_ll
		(void *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
		(void *to, const void __user *from, unsigned long n);

/**
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 * The caller should also make sure he pins the user space address
 * so that we don't result in page fault and sleep.
 */
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
	check_object_size(from, n, true);
	return __copy_to_user_ll(to, from, n);
	return __copy_user_ll((__force void *)to, from, n);
}

/**
 * __copy_to_user: - Copy a block of data into user space, with less checking.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only. This function may sleep if pagefaults are
 *          enabled.
 *
 * Copy data from kernel space to user space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	might_fault();
	return __copy_to_user_inatomic(to, from, n);
}

static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
	return __copy_from_user_ll_nozero(to, from, n);
}

/**
 * __copy_from_user: - Copy a block of data from user space, with less checking.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only. This function may sleep if pagefaults are
 *          enabled.
 *
 * Copy data from user space to kernel space.  Caller must check
 * the specified block with access_ok() before calling this function.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 *
 * An alternate version - __copy_from_user_inatomic() - may be called from
 * atomic context and will fail rather than sleep.  In this case the
 * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
 * for explanation of why this is needed.
 */
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_fault();
	check_object_size(to, n, false);
	if (__builtin_constant_p(n)) {
		unsigned long ret;

		switch (n) {
		case 1:
			ret = 0;
			__uaccess_begin();
			__get_user_size(*(u8 *)to, from, 1, ret, 1);
			__get_user_asm_nozero(*(u8 *)to, from, ret,
					      "b", "b", "=q", 1);
			__uaccess_end();
			return ret;
		case 2:
			ret = 0;
			__uaccess_begin();
			__get_user_size(*(u16 *)to, from, 2, ret, 2);
			__get_user_asm_nozero(*(u16 *)to, from, ret,
					      "w", "w", "=r", 2);
			__uaccess_end();
			return ret;
		case 4:
			ret = 0;
			__uaccess_begin();
			__get_user_size(*(u32 *)to, from, 4, ret, 4);
			__get_user_asm_nozero(*(u32 *)to, from, ret,
					      "l", "k", "=r", 4);
			__uaccess_end();
			return ret;
		}
	}
	return __copy_from_user_ll(to, from, n);
	return __copy_user_ll(to, (__force const void *)from, n);
}

static __always_inline unsigned long
+5 −40
Original line number Diff line number Diff line
@@ -45,15 +45,11 @@ copy_user_generic(void *to, const void *from, unsigned len)
	return ret;
}

__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
	int ret = 0;

	check_object_size(dst, size, false);
	if (!__builtin_constant_p(size))
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
@@ -106,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
	}
}

static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	might_fault();
	kasan_check_write(dst, size);
	return __copy_from_user_nocheck(dst, src, size);
}

static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
	int ret = 0;

	check_object_size(src, size, true);
	if (!__builtin_constant_p(size))
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
@@ -175,34 +162,12 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
}

static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	might_fault();
	kasan_check_read(src, size);
	return __copy_to_user_nocheck(dst, src, size);
}

static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
{
	return copy_user_generic((__force void *)dst,
				 (__force void *)src, size);
}

static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
	kasan_check_write(dst, size);
	return __copy_from_user_nocheck(dst, src, size);
}

static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
	kasan_check_read(src, size);
	return __copy_to_user_nocheck(dst, src, size);
}

extern long __copy_user_nocache(void *dst, const void __user *src,
				unsigned size, int zerorest);

+1 −54
Original line number Diff line number Diff line
@@ -4,12 +4,9 @@
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/highmem.h>
#include <linux/uaccess.h>
#include <linux/export.h>

#include <asm/word-at-a-time.h>
#include <linux/sched.h>

/*
 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 * nested NMI paths are careful to preserve CR2.
@@ -34,53 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
	return ret;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);

/**
 * copy_to_user: - Copy a block of data into user space.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only. This function may sleep if pagefaults are
 *          enabled.
 *
 * Copy data from kernel space to user space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
{
	if (access_ok(VERIFY_WRITE, to, n))
		n = __copy_to_user(to, from, n);
	return n;
}
EXPORT_SYMBOL(_copy_to_user);

/**
 * copy_from_user: - Copy a block of data from user space.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only. This function may sleep if pagefaults are
 *          enabled.
 *
 * Copy data from user space to kernel space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 */
unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
{
	unsigned long res = n;
	if (access_ok(VERIFY_READ, from, n))
		res = __copy_from_user_inatomic(to, from, n);
	if (unlikely(res))
		memset(to + n - res, 0, res);
	return res;
}
EXPORT_SYMBOL(_copy_from_user);
Loading