Commit c9057836 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull copy_and_csum cleanups from Al Viro:
 "Saner calling conventions for csum_and_copy_..._user() and friends"

[ Removing 800+ lines of code and cleaning stuff up is good  - Linus ]

* 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ppc: propagate the calling conventions change down to csum_partial_copy_generic()
  amd64: switch csum_partial_copy_generic() to new calling conventions
  sparc64: propagate the calling convention changes down to __csum_partial_copy_...()
  xtensa: propagate the calling conventions change down into csum_partial_copy_generic()
  mips: propagate the calling convention change down into __csum_partial_copy_..._user()
  mips: __csum_partial_copy_kernel() has no users left
  mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS
  sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic()
  i386: propagate the calling conventions change down to csum_partial_copy_generic()
  sh: propage the calling conventions change down to csum_partial_copy_generic()
  m68k: get rid of zeroing destination on error in csum_and_copy_from_user()
  arm: propagate the calling convention changes down to csum_partial_copy_from_user()
  alpha: propagate the calling convention changes down to csum_partial_copy.c helpers
  saner calling conventions for csum_and_copy_..._user()
  csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum
  csum_partial_copy_nocheck(): drop the last argument
  unify generic instances of csum_partial_copy_nocheck()
  icmp_push_reply(): reorder adding the checksum up
  skb_copy_and_csum_bits(): don't bother with the last argument
parents 50d22834 70d65cd5
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 * better 64-bit) boundary
 */
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
#define _HAVE_ARCH_CSUM_AND_COPY
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);

__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);


/*
+71 −93
Original line number Diff line number Diff line
@@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
#define insqh(x,y,z) \
__asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))


#define __get_user_u(x,ptr)				\
#define __get_word(insn,x,ptr)				\
({							\
	long __guu_err;					\
	__asm__ __volatile__(				\
	"1:	ldq_u %0,%2\n"				\
	"1:	"#insn" %0,%2\n"			\
	"2:\n"						\
	EXC(1b,2b,%0,%1)				\
		: "=r"(x), "=r"(__guu_err)		\
@@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
	__guu_err;					\
})

#define __put_user_u(x,ptr)				\
({							\
	long __puu_err;					\
	__asm__ __volatile__(				\
	"1:	stq_u %2,%1\n"				\
	"2:\n"						\
	EXC(1b,2b,$31,%0)				\
		: "=r"(__puu_err)			\
		: "m"(__m(addr)), "rJ"(x), "0"(0));	\
	__puu_err;					\
})


static inline unsigned short from64to16(unsigned long x)
{
	/* Using extract instructions is a bit more efficient
@@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x)
 */
static inline unsigned long
csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
			 long len, unsigned long checksum,
			 int *errp)
			 long len)
{
	unsigned long checksum = ~0U;
	unsigned long carry = 0;
	int err = 0;

	while (len >= 0) {
		unsigned long word;
		err |= __get_user(word, src);
		if (__get_word(ldq, word, src))
			return 0;
		checksum += carry;
		src++;
		checksum += word;
@@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
	checksum += carry;
	if (len) {
		unsigned long word, tmp;
		err |= __get_user(word, src);
		if (__get_word(ldq, word, src))
			return 0;
		tmp = *dst;
		mskql(word, len, word);
		checksum += word;
@@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
		*dst = word | tmp;
		checksum += carry;
	}
	if (err && errp) *errp = err;
	return checksum;
}

@@ -137,20 +123,21 @@ static inline unsigned long
csum_partial_cfu_dest_aligned(const unsigned long __user *src,
			      unsigned long *dst,
			      unsigned long soff,
			      long len, unsigned long checksum,
			      int *errp)
			      long len)
{
	unsigned long first;
	unsigned long word, carry;
	unsigned long lastsrc = 7+len+(unsigned long)src;
	int err = 0;
	unsigned long checksum = ~0U;

	err |= __get_user_u(first,src);
	if (__get_word(ldq_u, first,src))
		return 0;
	carry = 0;
	while (len >= 0) {
		unsigned long second;

		err |= __get_user_u(second, src+1);
		if (__get_word(ldq_u, second, src+1))
			return 0;
		extql(first, soff, word);
		len -= 8;
		src++;
@@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
	if (len) {
		unsigned long tmp;
		unsigned long second;
		err |= __get_user_u(second, lastsrc);
		if (__get_word(ldq_u, second, lastsrc))
			return 0;
		tmp = *dst;
		extql(first, soff, word);
		extqh(second, soff, first);
@@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
		*dst = word | tmp;
		checksum += carry;
	}
	if (err && errp) *errp = err;
	return checksum;
}

@@ -191,18 +178,18 @@ static inline unsigned long
csum_partial_cfu_src_aligned(const unsigned long __user *src,
			     unsigned long *dst,
			     unsigned long doff,
			     long len, unsigned long checksum,
			     unsigned long partial_dest,
			     int *errp)
			     long len,
			     unsigned long partial_dest)
{
	unsigned long carry = 0;
	unsigned long word;
	unsigned long second_dest;
	int err = 0;
	unsigned long checksum = ~0U;

	mskql(partial_dest, doff, partial_dest);
	while (len >= 0) {
		err |= __get_user(word, src);
		if (__get_word(ldq, word, src))
			return 0;
		len -= 8;
		insql(word, doff, second_dest);
		checksum += carry;
@@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
	len += 8;
	if (len) {
		checksum += carry;
		err |= __get_user(word, src);
		if (__get_word(ldq, word, src))
			return 0;
		mskql(word, len, word);
		len -= 8;
		checksum += word;
@@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
	stq_u(partial_dest | second_dest, dst);
out:
	checksum += carry;
	if (err && errp) *errp = err;
	return checksum;
}

@@ -249,23 +236,23 @@ static inline unsigned long
csum_partial_cfu_unaligned(const unsigned long __user * src,
			   unsigned long * dst,
			   unsigned long soff, unsigned long doff,
			   long len, unsigned long checksum,
			   unsigned long partial_dest,
			   int *errp)
			   long len, unsigned long partial_dest)
{
	unsigned long carry = 0;
	unsigned long first;
	unsigned long lastsrc;
	int err = 0;
	unsigned long checksum = ~0U;

	err |= __get_user_u(first, src);
	if (__get_word(ldq_u, first, src))
		return 0;
	lastsrc = 7+len+(unsigned long)src;
	mskql(partial_dest, doff, partial_dest);
	while (len >= 0) {
		unsigned long second, word;
		unsigned long second_dest;

		err |= __get_user_u(second, src+1);
		if (__get_word(ldq_u, second, src+1))
			return 0;
		extql(first, soff, word);
		checksum += carry;
		len -= 8;
@@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
		unsigned long second, word;
		unsigned long second_dest;

		err |= __get_user_u(second, lastsrc);
		if (__get_word(ldq_u, second, lastsrc))
			return 0;
		extql(first, soff, word);
		extqh(second, soff, first);
		word |= first;
@@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
		unsigned long second, word;
		unsigned long second_dest;

		err |= __get_user_u(second, lastsrc);
		if (__get_word(ldq_u, second, lastsrc))
			return 0;
		extql(first, soff, word);
		extqh(second, soff, first);
		word |= first;
@@ -320,35 +309,25 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
		stq_u(partial_dest | word | second_dest, dst);
		checksum += carry;
	}
	if (err && errp) *errp = err;
	return checksum;
}

__wsum
csum_and_copy_from_user(const void __user *src, void *dst, int len,
			       __wsum sum, int *errp)
static __wsum __csum_and_copy(const void __user *src, void *dst, int len)
{
	unsigned long checksum = (__force u32) sum;
	unsigned long soff = 7 & (unsigned long) src;
	unsigned long doff = 7 & (unsigned long) dst;
	unsigned long checksum;

	if (len) {
		if (!access_ok(src, len)) {
			if (errp) *errp = -EFAULT;
			memset(dst, 0, len);
			return sum;
		}
	if (!doff) {
		if (!soff)
			checksum = csum_partial_cfu_aligned(
				(const unsigned long __user *) src,
					(unsigned long *) dst,
					len-8, checksum, errp);
				(unsigned long *) dst, len-8);
		else
			checksum = csum_partial_cfu_dest_aligned(
				(const unsigned long __user *) src,
				(unsigned long *) dst,
					soff, len-8, checksum, errp);
				soff, len-8);
	} else {
		unsigned long partial_dest;
		ldq_u(partial_dest, dst);
@@ -356,30 +335,29 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len,
			checksum = csum_partial_cfu_src_aligned(
				(const unsigned long __user *) src,
				(unsigned long *) dst,
					doff, len-8, checksum,
					partial_dest, errp);
				doff, len-8, partial_dest);
		else
			checksum = csum_partial_cfu_unaligned(
				(const unsigned long __user *) src,
				(unsigned long *) dst,
					soff, doff, len-8, checksum,
					partial_dest, errp);
				soff, doff, len-8, partial_dest);
	}
		checksum = from64to16 (checksum);
	return (__force __wsum)from64to16 (checksum);
}
	return (__force __wsum)checksum;

__wsum
csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
	if (!access_ok(src, len))
		return 0;
	return __csum_and_copy(src, dst, len);
}
EXPORT_SYMBOL(csum_and_copy_from_user);

__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
	__wsum checksum;
	mm_segment_t oldfs = get_fs();
	set_fs(KERNEL_DS);
	checksum = csum_and_copy_from_user((__force const void __user *)src,
						dst, len, sum, NULL);
	set_fs(oldfs);
	return checksum;
	return __csum_and_copy((__force const void __user *)src,
						dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
+7 −10
Original line number Diff line number Diff line
@@ -35,23 +35,20 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
 */

__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
csum_partial_copy_nocheck(const void *src, void *dst, int len);

__wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
csum_partial_copy_from_user(const void __user *src, void *dst, int len);

#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
#define _HAVE_ARCH_CSUM_AND_COPY
static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
				      int len, __wsum sum, int *err_ptr)
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
	if (access_ok(src, len))
		return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
	if (!access_ok(src, len))
		return 0;

	if (len)
		*err_ptr = -EFAULT;

	return sum;
	return csum_partial_copy_from_user(src, dst, len);
}

/*
+2 −2
Original line number Diff line number Diff line
@@ -9,8 +9,8 @@

		.text

/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
 * Params  : r0 = src, r1 = dst, r2 = len, r3 = checksum
/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
 * Params  : r0 = src, r1 = dst, r2 = len
 * Returns : r0 = new checksum
 */

+1 −0
Original line number Diff line number Diff line
@@ -86,6 +86,7 @@ sum .req r3

FN_ENTRY
		save_regs
		mov	sum, #-1

		cmp	len, #8			@ Ensure that we have at least
		blo	.Lless8			@ 8 bytes to copy.
Loading