aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 16:24:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 16:24:13 -0700
commitc90578360c92c71189308ebc71087197080e94c3 (patch)
tree15cccf727f6fe35ffd81922461996c1c2ca1ebfd /arch
parentMerge tag 'docs-5.10' of git://git.lwn.net/linux (diff)
parentppc: propagate the calling conventions change down to csum_partial_copy_generic() (diff)
downloadlinux-dev-c90578360c92c71189308ebc71087197080e94c3.tar.xz
linux-dev-c90578360c92c71189308ebc71087197080e94c3.zip
Merge branch 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull copy_and_csum cleanups from Al Viro: "Saner calling conventions for csum_and_copy_..._user() and friends" [ Removing 800+ lines of code and cleaning stuff up is good - Linus ] * 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: ppc: propagate the calling conventions change down to csum_partial_copy_generic() amd64: switch csum_partial_copy_generic() to new calling conventions sparc64: propagate the calling convention changes down to __csum_partial_copy_...() xtensa: propagate the calling conventions change down into csum_partial_copy_generic() mips: propagate the calling convention change down into __csum_partial_copy_..._user() mips: __csum_partial_copy_kernel() has no users left mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic() i386: propagate the calling conventions change down to csum_partial_copy_generic() sh: propage the calling conventions change down to csum_partial_copy_generic() m68k: get rid of zeroing destination on error in csum_and_copy_from_user() arm: propagate the calling convention changes down to csum_partial_copy_from_user() alpha: propagate the calling convention changes down to csum_partial_copy.c helpers saner calling conventions for csum_and_copy_..._user() csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum csum_partial_copy_nocheck(): drop the last argument unify generic instances of csum_partial_copy_nocheck() icmp_push_reply(): reorder adding the checksum up skb_copy_and_csum_bits(): don't bother with the last argument
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/checksum.h5
-rw-r--r--arch/alpha/lib/csum_partial_copy.c164
-rw-r--r--arch/arm/include/asm/checksum.h17
-rw-r--r--arch/arm/lib/csumpartialcopy.S4
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S1
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S26
-rw-r--r--arch/c6x/include/asm/checksum.h3
-rw-r--r--arch/c6x/lib/csum_64plus.S4
-rw-r--r--arch/hexagon/include/asm/checksum.h11
-rw-r--r--arch/hexagon/lib/checksum.c11
-rw-r--r--arch/ia64/include/asm/checksum.h3
-rw-r--r--arch/ia64/lib/csum_partial_copy.c15
-rw-r--r--arch/m68k/include/asm/checksum.h7
-rw-r--r--arch/m68k/lib/checksum.c88
-rw-r--r--arch/mips/include/asm/checksum.h68
-rw-r--r--arch/mips/lib/csum_partial.S261
-rw-r--r--arch/nios2/include/asm/checksum.h4
-rw-r--r--arch/parisc/include/asm/checksum.h28
-rw-r--r--arch/parisc/lib/checksum.c17
-rw-r--r--arch/powerpc/include/asm/checksum.h13
-rw-r--r--arch/powerpc/lib/checksum_32.S74
-rw-r--r--arch/powerpc/lib/checksum_64.S37
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c74
-rw-r--r--arch/s390/include/asm/checksum.h7
-rw-r--r--arch/sh/include/asm/checksum_32.h36
-rw-r--r--arch/sh/lib/checksum.S119
-rw-r--r--arch/sparc/include/asm/checksum.h2
-rw-r--r--arch/sparc/include/asm/checksum_32.h70
-rw-r--r--arch/sparc/include/asm/checksum_64.h39
-rw-r--r--arch/sparc/lib/checksum_32.S202
-rw-r--r--arch/sparc/lib/csum_copy.S3
-rw-r--r--arch/sparc/lib/csum_copy_from_user.S4
-rw-r--r--arch/sparc/lib/csum_copy_to_user.S4
-rw-r--r--arch/sparc/mm/fault_32.c6
-rw-r--r--arch/x86/include/asm/checksum.h1
-rw-r--r--arch/x86/include/asm/checksum_32.h40
-rw-r--r--arch/x86/include/asm/checksum_64.h14
-rw-r--r--arch/x86/lib/checksum_32.S117
-rw-r--r--arch/x86/lib/csum-copy_64.S140
-rw-r--r--arch/x86/lib/csum-wrappers_64.c86
-rw-r--r--arch/x86/um/asm/checksum.h16
-rw-r--r--arch/x86/um/asm/checksum_32.h23
-rw-r--r--arch/xtensa/include/asm/checksum.h34
-rw-r--r--arch/xtensa/lib/checksum.S67
44 files changed, 566 insertions, 1399 deletions
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h
index 0eac81624d01..99d631e146b2 100644
--- a/arch/alpha/include/asm/checksum.h
+++ b/arch/alpha/include/asm/checksum.h
@@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/*
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index af1dad74e933..dc68efbe9367 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
#define insqh(x,y,z) \
__asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
-
-#define __get_user_u(x,ptr) \
+#define __get_word(insn,x,ptr) \
({ \
long __guu_err; \
__asm__ __volatile__( \
- "1: ldq_u %0,%2\n" \
+ "1: "#insn" %0,%2\n" \
"2:\n" \
EXC(1b,2b,%0,%1) \
: "=r"(x), "=r"(__guu_err) \
@@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__guu_err; \
})
-#define __put_user_u(x,ptr) \
-({ \
- long __puu_err; \
- __asm__ __volatile__( \
- "1: stq_u %2,%1\n" \
- "2:\n" \
- EXC(1b,2b,$31,%0) \
- : "=r"(__puu_err) \
- : "m"(__m(addr)), "rJ"(x), "0"(0)); \
- __puu_err; \
-})
-
-
static inline unsigned short from64to16(unsigned long x)
{
/* Using extract instructions is a bit more efficient
@@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x)
*/
static inline unsigned long
csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
- long len, unsigned long checksum,
- int *errp)
+ long len)
{
+ unsigned long checksum = ~0U;
unsigned long carry = 0;
- int err = 0;
while (len >= 0) {
unsigned long word;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
checksum += carry;
src++;
checksum += word;
@@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
checksum += carry;
if (len) {
unsigned long word, tmp;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
tmp = *dst;
mskql(word, len, word);
checksum += word;
@@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
*dst = word | tmp;
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
@@ -137,20 +123,21 @@ static inline unsigned long
csum_partial_cfu_dest_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long soff,
- long len, unsigned long checksum,
- int *errp)
+ long len)
{
unsigned long first;
unsigned long word, carry;
unsigned long lastsrc = 7+len+(unsigned long)src;
- int err = 0;
+ unsigned long checksum = ~0U;
- err |= __get_user_u(first,src);
+ if (__get_word(ldq_u, first,src))
+ return 0;
carry = 0;
while (len >= 0) {
unsigned long second;
- err |= __get_user_u(second, src+1);
+ if (__get_word(ldq_u, second, src+1))
+ return 0;
extql(first, soff, word);
len -= 8;
src++;
@@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
if (len) {
unsigned long tmp;
unsigned long second;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
tmp = *dst;
extql(first, soff, word);
extqh(second, soff, first);
@@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
*dst = word | tmp;
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
@@ -191,18 +178,18 @@ static inline unsigned long
csum_partial_cfu_src_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long doff,
- long len, unsigned long checksum,
- unsigned long partial_dest,
- int *errp)
+ long len,
+ unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long word;
unsigned long second_dest;
- int err = 0;
+ unsigned long checksum = ~0U;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
len -= 8;
insql(word, doff, second_dest);
checksum += carry;
@@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
len += 8;
if (len) {
checksum += carry;
- err |= __get_user(word, src);
+ if (__get_word(ldq, word, src))
+ return 0;
mskql(word, len, word);
len -= 8;
checksum += word;
@@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
stq_u(partial_dest | second_dest, dst);
out:
checksum += carry;
- if (err && errp) *errp = err;
return checksum;
}
@@ -249,23 +236,23 @@ static inline unsigned long
csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long * dst,
unsigned long soff, unsigned long doff,
- long len, unsigned long checksum,
- unsigned long partial_dest,
- int *errp)
+ long len, unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long first;
unsigned long lastsrc;
- int err = 0;
+ unsigned long checksum = ~0U;
- err |= __get_user_u(first, src);
+ if (__get_word(ldq_u, first, src))
+ return 0;
lastsrc = 7+len+(unsigned long)src;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, src+1);
+ if (__get_word(ldq_u, second, src+1))
+ return 0;
extql(first, soff, word);
checksum += carry;
len -= 8;
@@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
- err |= __get_user_u(second, lastsrc);
+ if (__get_word(ldq_u, second, lastsrc))
+ return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@@ -320,66 +309,55 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
stq_u(partial_dest | word | second_dest, dst);
checksum += carry;
}
- if (err && errp) *errp = err;
return checksum;
}
-__wsum
-csum_and_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *errp)
+static __wsum __csum_and_copy(const void __user *src, void *dst, int len)
{
- unsigned long checksum = (__force u32) sum;
unsigned long soff = 7 & (unsigned long) src;
unsigned long doff = 7 & (unsigned long) dst;
-
- if (len) {
- if (!access_ok(src, len)) {
- if (errp) *errp = -EFAULT;
- memset(dst, 0, len);
- return sum;
- }
- if (!doff) {
- if (!soff)
- checksum = csum_partial_cfu_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- len-8, checksum, errp);
- else
- checksum = csum_partial_cfu_dest_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- soff, len-8, checksum, errp);
- } else {
- unsigned long partial_dest;
- ldq_u(partial_dest, dst);
- if (!soff)
- checksum = csum_partial_cfu_src_aligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- doff, len-8, checksum,
- partial_dest, errp);
- else
- checksum = csum_partial_cfu_unaligned(
- (const unsigned long __user *) src,
- (unsigned long *) dst,
- soff, doff, len-8, checksum,
- partial_dest, errp);
- }
- checksum = from64to16 (checksum);
+ unsigned long checksum;
+
+ if (!doff) {
+ if (!soff)
+ checksum = csum_partial_cfu_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst, len-8);
+ else
+ checksum = csum_partial_cfu_dest_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, len-8);
+ } else {
+ unsigned long partial_dest;
+ ldq_u(partial_dest, dst);
+ if (!soff)
+ checksum = csum_partial_cfu_src_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ doff, len-8, partial_dest);
+ else
+ checksum = csum_partial_cfu_unaligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, doff, len-8, partial_dest);
}
- return (__force __wsum)checksum;
+ return (__force __wsum)from64to16 (checksum);
+}
+
+__wsum
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_and_copy(src, dst, len);
}
EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- __wsum checksum;
- mm_segment_t oldfs = get_fs();
- set_fs(KERNEL_DS);
- checksum = csum_and_copy_from_user((__force const void __user *)src,
- dst, len, sum, NULL);
- set_fs(oldfs);
- return checksum;
+ return __csum_and_copy((__force const void __user *)src,
+ dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index ed6073fee338..f0f54aef3724 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -35,23 +35,20 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
static inline
-__wsum csum_and_copy_from_user (const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+ if (!access_ok(src, len))
+ return 0;
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ return csum_partial_copy_from_user(src, dst, len);
}
/*
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 184d97254a7a..1ca6aadd649c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -9,8 +9,8 @@
.text
-/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
- * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
+/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
+ * Params : r0 = src, r1 = dst, r2 = len
* Returns : r0 = new checksum
*/
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 0b706a39a677..0fd5c10e90a7 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -86,6 +86,7 @@ sum .req r3
FN_ENTRY
save_regs
+ mov sum, #-1
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6bd3a93eaa3c..6928781e6bee 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -62,9 +62,9 @@
/*
* unsigned int
- * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr)
- * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr
- * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ * csum_partial_copy_from_user(const char *src, char *dst, int len)
+ * r0 = src, r1 = dst, r2 = len
+ * Returns : r0 = checksum or 0
*/
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
@@ -73,25 +73,11 @@
#include "csumpartialcopygeneric.S"
/*
- * FIXME: minor buglet here
- * We don't return the checksum for the data present in the buffer. To do
- * so properly, we would have to add in whatever registers were loaded before
- * the fault, which, with the current asm above is not predictable.
+ * We report fault by returning 0 csum - impossible in normal case, since
+ * we start with 0xffffffff for initial sum.
*/
.pushsection .text.fixup,"ax"
.align 4
-9001: mov r4, #-EFAULT
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r5, [sp, #9*4] @ *err_ptr
-#else
- ldr r5, [sp, #8*4] @ *err_ptr
-#endif
- str r4, [r5]
- ldmia sp, {r1, r2} @ retrieve dst, len
- add r2, r2, r1
- mov r0, #0 @ zero the buffer
-9002: teq r2, r1
- strbne r0, [r1], #1
- bne 9002b
+9001: mov r0, #0
load_regs
.popsection
diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h
index 36770b8308d9..934918def632 100644
--- a/arch/c6x/include/asm/checksum.h
+++ b/arch/c6x/include/asm/checksum.h
@@ -26,6 +26,9 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
+#define _HAVE_ARCH_CSUM_AND_COPY
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+
#include <asm-generic/checksum.h>
#endif /* _ASM_C6X_CHECKSUM_H */
diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S
index 9c07127485d1..57148866d8d3 100644
--- a/arch/c6x/lib/csum_64plus.S
+++ b/arch/c6x/lib/csum_64plus.S
@@ -24,7 +24,6 @@
ENTRY(csum_partial_copy_nocheck)
MVC .S2 ILC,B30
- MV .D1X B6,A31 ; given csum
ZERO .D1 A9 ; csum (a side)
|| ZERO .D2 B9 ; csum (b side)
|| SHRU .S2X A6,2,B5 ; len / 4
@@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4
SHRU .S1 A9,16,A0
[A0] BNOP .S1 L91,5
-L10: ADD .D1 A31,A9,A9
- MV .D1 A9,A4
+L10: MV .D1 A9,A4
BNOP .S2 B3,4
MVC .S2 B30,ILC
diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h
index a5c42f4614c1..4bc6ad96c4c5 100644
--- a/arch/hexagon/include/asm/checksum.h
+++ b/arch/hexagon/include/asm/checksum.h
@@ -10,17 +10,6 @@
unsigned int do_csum(const void *voidptr, int len);
/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
index c4a6b72d97de..ba50822a0800 100644
--- a/arch/hexagon/lib/checksum.c
+++ b/arch/hexagon/lib/checksum.c
@@ -176,14 +176,3 @@ unsigned int do_csum(const void *voidptr, int len)
return 0xFFFF & sum0;
}
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h
index 2a1c64629cdc..f3026213aa32 100644
--- a/arch/ia64/include/asm/checksum.h
+++ b/arch/ia64/include/asm/checksum.h
@@ -37,9 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
/*
* This routine is used for miscellaneous IP-like checksums, mainly in
* icmp.c
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 6e82e0be8040..917e3138b277 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -96,18 +96,3 @@ unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
out:
return result;
}
-
-/*
- * XXX Fixme
- *
- * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS.
- * But it's very tricky to get right even in C.
- */
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index 3f2c15d6f18c..692e7b6cc042 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -31,14 +31,13 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
extern __wsum csum_and_copy_from_user(const void __user *src,
void *dst,
- int len, __wsum sum,
- int *csum_err);
+ int len);
extern __wsum csum_partial_copy_nocheck(const void *src,
- void *dst, int len,
- __wsum sum);
+ void *dst, int len);
/*
* This is a version of ip_fast_csum() optimized for IP headers,
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index 31797be9a3dc..7e6afeae6217 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -129,8 +129,7 @@ EXPORT_SYMBOL(csum_partial);
*/
__wsum
-csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err)
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
/*
* GCC doesn't like more than 10 operands for the asm
@@ -138,6 +137,7 @@ csum_and_copy_from_user(const void __user *src, void *dst,
* code.
*/
unsigned long tmp1, tmp2;
+ __wsum sum = ~0U;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
@@ -236,84 +236,33 @@ csum_and_copy_from_user(const void __user *src, void *dst,
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"7:\t"
- "clrl %5\n" /* no error - clear return value */
- "8:\n"
".section .fixup,\"ax\"\n"
".even\n"
- /* If any exception occurs zero out the rest.
- Similarities with the code above are intentional :-) */
+ /* If any exception occurs, return 0 */
"90:\t"
- "clrw %3@+\n\t"
- "movel %1,%4\n\t"
- "lsrl #5,%1\n\t"
- "jeq 1f\n\t"
- "subql #1,%1\n"
- "91:\t"
- "clrl %3@+\n"
- "92:\t"
- "clrl %3@+\n"
- "93:\t"
- "clrl %3@+\n"
- "94:\t"
- "clrl %3@+\n"
- "95:\t"
- "clrl %3@+\n"
- "96:\t"
- "clrl %3@+\n"
- "97:\t"
- "clrl %3@+\n"
- "98:\t"
- "clrl %3@+\n\t"
- "dbra %1,91b\n\t"
- "clrw %1\n\t"
- "subql #1,%1\n\t"
- "jcc 91b\n"
- "1:\t"
- "movel %4,%1\n\t"
- "andw #0x1c,%4\n\t"
- "jeq 1f\n\t"
- "lsrw #2,%4\n\t"
- "subqw #1,%4\n"
- "99:\t"
- "clrl %3@+\n\t"
- "dbra %4,99b\n\t"
- "1:\t"
- "andw #3,%1\n\t"
- "jeq 9f\n"
- "100:\t"
- "clrw %3@+\n\t"
- "tstw %1\n\t"
- "jeq 9f\n"
- "101:\t"
- "clrb %3@+\n"
- "9:\t"
-#define STR(X) STR1(X)
-#define STR1(X) #X
- "moveq #-" STR(EFAULT) ",%5\n\t"
- "jra 8b\n"
+ "clrl %0\n"
+ "jra 7b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 10b,90b\n"
- ".long 11b,91b\n"
- ".long 12b,92b\n"
- ".long 13b,93b\n"
- ".long 14b,94b\n"
- ".long 15b,95b\n"
- ".long 16b,96b\n"
- ".long 17b,97b\n"
- ".long 18b,98b\n"
- ".long 19b,99b\n"
- ".long 20b,100b\n"
- ".long 21b,101b\n"
+ ".long 11b,90b\n"
+ ".long 12b,90b\n"
+ ".long 13b,90b\n"
+ ".long 14b,90b\n"
+ ".long 15b,90b\n"
+ ".long 16b,90b\n"
+ ".long 17b,90b\n"
+ ".long 18b,90b\n"
+ ".long 19b,90b\n"
+ ".long 20b,90b\n"
+ ".long 21b,90b\n"
".previous"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
- *csum_err = tmp2;
-
- return(sum);
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -324,9 +273,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
unsigned long tmp1, tmp2;
+ __wsum sum = 0;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 181f7d14efb9..5f80c28f5253 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -34,42 +34,17 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum __csum_partial_copy_kernel(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-
-__wsum __csum_partial_copy_from_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-__wsum __csum_partial_copy_to_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
- */
-static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err_ptr)
-{
- might_fault();
- if (uaccess_kernel())
- return __csum_partial_copy_kernel((__force void *)src, dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_from_user((__force void *)src, dst,
- len, sum, err_ptr);
-}
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum,
- err_ptr);
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ might_fault();
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_partial_copy_from_user(src, dst, len);
}
/*
@@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
*/
#define HAVE_CSUM_COPY_USER
static inline
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
might_fault();
- if (access_ok(dst, len)) {
- if (uaccess_kernel())
- return __csum_partial_copy_kernel(src,
- (__force void *)dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_to_user(src,
- (__force void *)dst,
- len, sum, err_ptr);
- }
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return __csum_partial_copy_to_user(src, dst, len);
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return __csum_partial_copy_nocheck(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 87fda0713b84..a46db0807195 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
/*
* checksum and copy routines based on memcpy.S
*
- * csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_kernel(src, dst, len, sum, errp)
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
#define src a0
#define dst a1
#define len a2
-#define psum a3
#define sum v0
#define odd t8
-#define errptr t9
/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- * not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
*/
/* Instruction type */
@@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
* addr : Address
* handler : Exception handler
*/
-#define EXC(insn, type, reg, addr, handler) \
+#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
-#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
#else
#define LOADK lw /* No exception */
-#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
.set at=v1
#endif
- .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
- PTR_ADDU AT, src, len /* See (1) above. */
- /* initialize __nocheck if this the first time we execute this
- * macro
- */
-#ifdef CONFIG_64BIT
- move errptr, a4
-#else
- lw errptr, 16(sp)
-#endif
- .if \__nocheck == 1
- FEXPORT(csum_partial_copy_nocheck)
- EXPORT_SYMBOL(csum_partial_copy_nocheck)
- .endif
- move sum, zero
+ li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
- LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
- LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
- LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
- LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
- STORE(t4, UNIT(4)(dst), .Ls_exc\@)
+ STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
- STORE(t5, UNIT(5)(dst), .Ls_exc\@)
+ STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
- STORE(t6, UNIT(6)(dst), .Ls_exc\@)
+ STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
- STORE(t7, UNIT(7)(dst), .Ls_exc\@)
+ STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
/*
* len >= 4*NBYTES
*/
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
- STREST(t0, -1(t1), .Ls_exc\@)
+ STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
* Set match = (src and dst have same alignment)
*/
#define match rem
- LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
- LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
- STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
- LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
- LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
- LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
- LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
- LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
- LOADBU(t0, N(src), .Ll_exc_copy\@); \
+ LOADBU(t0, N(src)); \
SUB len, len, 1; \
- STOREB(t0, N(dst), .Ls_exc\@); \
+ STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
- LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+ LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
- STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+ STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial)
#endif
.set pop
.set reorder
- ADDC32(sum, psum)
jr ra
.set noreorder
+ .endm
-.Ll_exc_copy\@:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOADK t0, TI_TASK($28)
- li t2, SHIFT_START
- LOADK t0, THREAD_BUADDR(t0)
-1:
- LOADBU(t1, 0(src), .Ll_exc\@)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- SLLV t1, t1, t2
- addu t2, SHIFT_INC
- ADDC(sum, t1)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc\@:
- LOADK t0, TI_TASK($28)
- nop
- LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- .set push
- .set noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
-#endif
- li v1, -EFAULT
- b .Ldone\@
- sw v1, (errptr)
-
-.Ls_exc\@:
- li v0, -1 /* invalid checksum */
- li v1, -EFAULT
+ .set noreorder
+.L_exc:
jr ra
- sw v1, (errptr)
- .set pop
- .endm
+ li v0, 0
-LEAF(__csum_partial_copy_kernel)
-EXPORT_SYMBOL(__csum_partial_copy_kernel)
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
-END(__csum_partial_copy_kernel)
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif
diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h
index b4316c361729..69004e07a1ba 100644
--- a/arch/nios2/include/asm/checksum.h
+++ b/arch/nios2/include/asm/checksum.h
@@ -12,10 +12,6 @@
/* Take these from lib/checksum.c */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
-
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
extern __sum16 ip_compute_csum(const void *buff, int len);
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index fe8c63b2d2c3..3c43baca7b39 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -19,14 +19,6 @@
extern __wsum csum_partial(const void *, int, __wsum);
/*
- * The same as csum_partial, but copies from src while it checksums.
- *
- * Here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
-
-/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
*
* Written by Randolph Chung <tausq@debian.org>, and then mucked with by
@@ -181,25 +173,5 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
-{
- /* code stolen from include/asm-mips64 */
- sum = csum_partial(src, len, sum);
-
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- return (__force __wsum)-1;
- }
-
- return sum;
-}
-
#endif
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index c6f161583549..4818f3db84a5 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -106,20 +106,3 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
}
EXPORT_SYMBOL(csum_partial);
-
-/*
- * copy while checksumming, otherwise like csum_partial
- */
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
-{
- /*
- * It's 2:30 am and I don't feel like doing it real ...
- * This is lots slower than the real thing (tm)
- */
- sum = csum_partial(src, len, sum);
- memcpy(dst, src, len);
-
- return sum;
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 9cce06194dcc..82f099ba2411 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -18,19 +18,18 @@
* Like csum_partial, this must be called with even lengths,
* except for the last fragment.
*/
-extern __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err, int *dst_err);
+extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+#define _HAVE_ARCH_CSUM_AND_COPY
+#define csum_partial_copy_nocheck(src, dst, len) \
+ csum_partial_copy_generic((src), (dst), (len))
/*
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index ecd150dc3ed9..ec5cd2dede35 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively, and (for an error on
- * src) zeroes the rest of dst.
+ * and adds in 0xffffffff, while copying the block to dst.
+ * If an access exception occurs it returns zero.
*
- * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
+ * csum_partial_copy_generic(src, dst, len)
*/
#define CSUM_COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
@@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial)
adde r12,r12,r10
#define CSUM_COPY_16_BYTES_EXCODE(n) \
- EX_TABLE(8 ## n ## 0b, src_error); \
- EX_TABLE(8 ## n ## 1b, src_error); \
- EX_TABLE(8 ## n ## 2b, src_error); \
- EX_TABLE(8 ## n ## 3b, src_error); \
- EX_TABLE(8 ## n ## 4b, dst_error); \
- EX_TABLE(8 ## n ## 5b, dst_error); \
- EX_TABLE(8 ## n ## 6b, dst_error); \
- EX_TABLE(8 ## n ## 7b, dst_error);
+ EX_TABLE(8 ## n ## 0b, fault); \
+ EX_TABLE(8 ## n ## 1b, fault); \
+ EX_TABLE(8 ## n ## 2b, fault); \
+ EX_TABLE(8 ## n ## 3b, fault); \
+ EX_TABLE(8 ## n ## 4b, fault); \
+ EX_TABLE(8 ## n ## 5b, fault); \
+ EX_TABLE(8 ## n ## 6b, fault); \
+ EX_TABLE(8 ## n ## 7b, fault);
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
@@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(csum_partial_copy_generic)
- stwu r1,-16(r1)
- stw r7,12(r1)
- stw r8,8(r1)
-
- addic r12,r6,0
+ li r12,-1
+ addic r0,r0,0 /* clear carry */
addi r6,r4,-4
neg r0,r4
addi r4,r3,-4
@@ -246,34 +241,19 @@ _GLOBAL(csum_partial_copy_generic)
rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
-/* read fault */
-src_error:
- lwz r7,12(r1)
- addi r1,r1,16
- cmpwi cr0,r7,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r7)
- blr
-/* write fault */
-dst_error:
- lwz r8,8(r1)
- addi r1,r1,16
- cmpwi cr0,r8,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r8)
+fault:
+ li r3,0
blr
- EX_TABLE(70b, src_error);
- EX_TABLE(71b, dst_error);
- EX_TABLE(72b, src_error);
- EX_TABLE(73b, dst_error);
- EX_TABLE(54b, dst_error);
+ EX_TABLE(70b, fault);
+ EX_TABLE(71b, fault);
+ EX_TABLE(72b, fault);
+ EX_TABLE(73b, fault);
+ EX_TABLE(54b, fault);
/*
* this stuff handles faults in the cacheline loop and branches to either
- * src_error (if in read part) or dst_error (if in write part)
+ * fault (if in read part) or fault (if in write part)
*/
CSUM_COPY_16_BYTES_EXCODE(0)
#if L1_CACHE_BYTES >= 32
@@ -290,12 +270,12 @@ dst_error:
#endif
#endif
- EX_TABLE(30b, src_error);
- EX_TABLE(31b, dst_error);
- EX_TABLE(40b, src_error);
- EX_TABLE(41b, dst_error);
- EX_TABLE(50b, src_error);
- EX_TABLE(51b, dst_error);
+ EX_TABLE(30b, fault);
+ EX_TABLE(31b, fault);
+ EX_TABLE(40b, fault);
+ EX_TABLE(41b, fault);
+ EX_TABLE(50b, fault);
+ EX_TABLE(51b, fault);
EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 514978f908d4..98ff51bd2f7d 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial)
.macro srcnr
100:
- EX_TABLE(100b,.Lsrc_error_nr)
+ EX_TABLE(100b,.Lerror_nr)
.endm
.macro source
150:
- EX_TABLE(150b,.Lsrc_error)
+ EX_TABLE(150b,.Lerror)
.endm
.macro dstnr
200:
- EX_TABLE(200b,.Ldest_error_nr)
+ EX_TABLE(200b,.Lerror_nr)
.endm
.macro dest
250:
- EX_TABLE(250b,.Ldest_error)
+ EX_TABLE(250b,.Lerror)
.endm
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively. The caller must take any action
- * required in this case (zeroing memory, recalculating partial checksum etc).
+ * and adds in 0xffffffff (32-bit), while copying the block to dst.
+ * If an access exception occurs, it returns 0.
*
- * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len)
*/
_GLOBAL(csum_partial_copy_generic)
+ li r6,-1
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
@@ -401,29 +400,15 @@ dstnr; stb r6,0(r4)
srdi r3,r3,32
blr
-.Lsrc_error:
+.Lerror:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
-.Lsrc_error_nr:
- cmpdi 0,r7,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r7)
+.Lerror_nr:
+ li r3,0
blr
-.Ldest_error:
- ld r14,STK_REG(R14)(r1)
- ld r15,STK_REG(R15)(r1)
- ld r16,STK_REG(R16)(r1)
- addi r1,r1,STACKFRAMESIZE
-.Ldest_error_nr:
- cmpdi 0,r8,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r8)
- blr
EXPORT_SYMBOL(csum_partial_copy_generic)
/*
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index fabe4db28726..b895166afc82 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -12,83 +12,37 @@
#include <linux/uaccess.h>
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- unsigned int csum;
+ __wsum csum;
might_sleep();
- allow_read_from_user(src, len);
-
- *err_ptr = 0;
-
- if (!len) {
- csum = 0;
- goto out;
- }
- if (unlikely((len < 0) || !access_ok(src, len))) {
- *err_ptr = -EFAULT;
- csum = (__force unsigned int)sum;
- goto out;
- }
+ if (unlikely(!access_ok(src, len)))
+ return 0;
- csum = csum_partial_copy_generic((void __force *)src, dst,
- len, sum, err_ptr, NULL);
-
- if (unlikely(*err_ptr)) {
- int missing = __copy_from_user(dst, src, len);
-
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *err_ptr = -EFAULT;
- } else {
- *err_ptr = 0;
- }
+ allow_read_from_user(src, len);
- csum = csum_partial(dst, len, sum);
- }
+ csum = csum_partial_copy_generic((void __force *)src, dst, len);
-out:
prevent_read_from_user(src, len);
- return (__force __wsum)csum;
+ return csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- unsigned int csum;
+ __wsum csum;
might_sleep();
- allow_write_to_user(dst, len);
-
- *err_ptr = 0;
-
- if (!len) {
- csum = 0;
- goto out;
- }
+ if (unlikely(!access_ok(dst, len)))
+ return 0;
- if (unlikely((len < 0) || !access_ok(dst, len))) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- goto out;
- }
-
- csum = csum_partial_copy_generic(src, (void __force *)dst,
- len, sum, NULL, err_ptr);
-
- if (unlikely(*err_ptr)) {
- csum = csum_partial(src, len, sum);
+ allow_write_to_user(dst, len);
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- }
- }
+ csum = csum_partial_copy_generic(src, (void __force *)dst, len);
-out:
prevent_write_to_user(dst, len);
- return (__force __wsum)csum;
+ return csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 6d01c96aeb5c..6813bfa1eeb7 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -39,13 +39,6 @@ csum_partial(const void *buff, int len, __wsum sum)
return sum;
}
-static inline __wsum
-csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst,src,len);
- return csum_partial(dst, len, sum);
-}
-
/*
* Fold a partial checksum without adding pseudo headers
*/
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h
index 91571a42e44e..1a391e3a7659 100644
--- a/arch/sh/include/asm/checksum_32.h
+++ b/arch/sh/include/asm/checksum_32.h
@@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
@@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* access_ok().
*/
static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_generic((__force const void *)src, dst,
- len, sum, err_ptr, NULL);
- if (len)
- *err_ptr = -EFAULT;
- return sum;
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+ int len)
{
- if (access_ok(dst, len))
- return csum_partial_copy_generic((__force const void *)src,
- dst, len, sum, NULL, err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
#endif /* __ASM_SH_CHECKSUM_H */
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 97b5c2d9fec4..3e07074e0098 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -173,47 +173,27 @@ ENTRY(csum_partial)
mov r6, r0
/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
- int sum, int *src_err_ptr, int *dst_err_ptr)
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
*/
/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
+ * Copy from ds while checksumming, otherwise like csum_partial with initial
+ * sum being ~0U
*/
-#define SRC(...) \
+#define EXC(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(...) \
- 9999: __VA_ARGS__ ; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
!
! r4: const char *SRC
! r5: char *DST
! r6: int LEN
-! r7: int SUM
-!
-! on stack:
-! int *SRC_ERR_PTR
-! int *DST_ERR_PTR
!
ENTRY(csum_partial_copy_generic)
- mov.l r5,@-r15
- mov.l r6,@-r15
-
+ mov #-1,r7
mov #3,r0 ! Check src and dest are equally aligned
mov r4,r1
and r0,r1
@@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic)
clrt
.align 2
5:
-SRC( mov.b @r4+,r1 )
-SRC( mov.b @r4+,r0 )
+EXC( mov.b @r4+,r1 )
+EXC( mov.b @r4+,r0 )
extu.b r1,r1
-DST( mov.b r1,@r5 )
-DST( mov.b r0,@(1,r5) )
+EXC( mov.b r1,@r5 )
+EXC( mov.b r0,@(1,r5) )
extu.b r0,r0
add #2,r5
@@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) )
! Handle first two bytes as a special case
.align 2
1:
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
@@ -292,32 +272,32 @@ DST( mov.w r0,@r5 )
clrt
.align 2
1:
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
-DST( mov.l r1,@(4,r5) )
+EXC( mov.l r0,@r5 )
+EXC( mov.l r1,@(4,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(8,r5) )
-DST( mov.l r1,@(12,r5) )
+EXC( mov.l r0,@(8,r5) )
+EXC( mov.l r1,@(12,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(16,r5) )
-DST( mov.l r1,@(20,r5) )
+EXC( mov.l r0,@(16,r5) )
+EXC( mov.l r1,@(20,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(24,r5) )
-DST( mov.l r1,@(28,r5) )
+EXC( mov.l r0,@(24,r5) )
+EXC( mov.l r1,@(28,r5) )
addc r1,r7
add #32,r5
movt r0
@@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) )
clrt
shlr2 r6
3:
-SRC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r0 )
addc r0,r7
-DST( mov.l r0,@r5 )
+EXC( mov.l r0,@r5 )
add #4,r5
movt r0
dt r6
@@ -353,8 +333,8 @@ DST( mov.l r0,@r5 )
mov #2,r1
cmp/hs r1,r6
bf 5f
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
extu.w r0,r0
add #2,r5
cmp/eq r1,r6
@@ -363,8 +343,8 @@ DST( mov.w r0,@r5 )
shll16 r0
addc r0,r7
5:
-SRC( mov.b @r4+,r0 )
-DST( mov.b r0,@r5 )
+EXC( mov.b @r4+,r0 )
+EXC( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
@@ -373,42 +353,13 @@ DST( mov.b r0,@r5 )
mov #0,r0
addc r0,r7
7:
-5000:
# Exception handler:
.section .fixup, "ax"
6001:
- mov.l @(8,r15),r0 ! src_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
-
- ! zero the complete destination - computing the rest
- ! is too much work
- mov.l @(4,r15),r5 ! dst
- mov.l @r15,r6 ! len
- mov #0,r7
-1: mov.b r7,@r5
- dt r6
- bf/s 1b
- add #1,r5
- mov.l 8000f,r0
- jmp @r0
- nop
- .align 2
-8000: .long 5000b
-
-6002:
- mov.l @(12,r15),r0 ! dst_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
- mov.l 8001f,r0
- jmp @r0
- nop
- .align 2
-8001: .long 5000b
-
+ rts
+ mov #0,r0
.previous
- add #8,r15
rts
mov r7,r0
diff --git a/arch/sparc/include/asm/checksum.h b/arch/sparc/include/asm/checksum.h
index a6256cb6fc5c..f2ac13323b6d 100644
--- a/arch/sparc/include/asm/checksum.h
+++ b/arch/sparc/include/asm/checksum.h
@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
+#define _HAVE_ARCH_CSUM_AND_COPY
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define HAVE_CSUM_COPY_USER
#if defined(__sparc__) && defined(__arch64__)
#include <asm/checksum_64.h>
#else
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index 479a0b812af5..ce11e0ad80c7 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
@@ -50,9 +50,9 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
- " mov %6, %%g7\n"
+ " mov -1, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
- : "0" (ret), "1" (d), "2" (l), "r" (sum)
+ : "0" (ret), "1" (d), "2" (l)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
@@ -60,65 +60,19 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
}
static inline __wsum
-csum_and_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err)
- {
- register unsigned long ret asm("o0") = (unsigned long)src;
- register char *d asm("o1") = dst;
- register int l asm("g1") = len;
- register __wsum s asm("g7") = sum;
-
- if (unlikely(!access_ok(src, len))) {
- if (len)
- *err = -EFAULT;
- return sum;
- }
-
- __asm__ __volatile__ (
- ".section __ex_table,#alloc\n\t"
- ".align 4\n\t"
- ".word 1f,2\n\t"
- ".previous\n"
- "1:\n\t"
- "call __csum_partial_copy_sparc_generic\n\t"
- " st %8, [%%sp + 64]\n"
- : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
- : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
- : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
- "cc", "memory");
- return (__force __wsum)ret;
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (unlikely(!access_ok(src, len)))
+ return 0;
+ return csum_partial_copy_nocheck((__force void *)src, dst, len);
}
-#define HAVE_CSUM_COPY_USER
-
static inline __wsum
-csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err)
+csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- if (!access_ok(dst, len)) {
- *err = -EFAULT;
- return sum;
- } else {
- register unsigned long ret asm("o0") = (unsigned long)src;
- register char __user *d asm("o1") = dst;
- register int l asm("g1") = len;
- register __wsum s asm("g7") = sum;
-
- __asm__ __volatile__ (
- ".section __ex_table,#alloc\n\t"
- ".align 4\n\t"
- ".word 1f,1\n\t"
- ".previous\n"
- "1:\n\t"
- "call __csum_partial_copy_sparc_generic\n\t"
- " st %8, [%%sp + 64]\n"
- : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
- : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
- : "o2", "o3", "o4", "o5", "o7",
- "g2", "g3", "g4", "g5",
- "cc", "memory");
- return (__force __wsum)ret;
- }
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_nocheck(src, (__force void *)dst, len);
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 0fa4433f5662..d6b59461e064 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -38,42 +38,9 @@ __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-long __csum_partial_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_and_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_from_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
-
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-long __csum_partial_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum);
-
-static inline __wsum
-csum_and_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum, int *err)
-{
- long ret = __csum_partial_copy_to_user(src, dst, len, sum);
- if (ret < 0)
- *err = -EFAULT;
- return (__force __wsum) ret;
-}
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 6a5469c97246..7488d130faf7 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -144,44 +144,21 @@ cpte: bne csum_partial_end_cruft ! yep, handle it
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
- .globl __csum_partial_copy_start, __csum_partial_copy_end
-__csum_partial_copy_start:
-
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
-#define EX(x,y,a,b) \
-98: x,y; \
- .section .fixup,ALLOC,EXECINSTR; \
- .align 4; \
-99: ba 30f; \
- a, b, %o3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
-
-#define EX2(x,y) \
-98: x,y; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 30f; \
- .text; \
- .align 4
-
-#define EX3(x,y) \
+#define EX(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
- .word 98b, 96f; \
+ .word 98b, cc_fault; \
.text; \
.align 4
-#define EXT(start,end,handler) \
+#define EXT(start,end) \
.section __ex_table,ALLOC; \
.align 4; \
- .word start, 0, end, handler; \
+ .word start, 0, end, cc_fault; \
.text; \
.align 4
@@ -252,21 +229,21 @@ __csum_partial_copy_start:
cc_end_cruft:
be 1f
andcc %o3, 4, %g0
- EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
+ EX(ldd [%o0 + 0x00], %g2)
add %o1, 8, %o1
addcc %g2, %g7, %g7
add %o0, 8, %o0
addxcc %g3, %g7, %g7
- EX2(st %g2, [%o1 - 0x08])
+ EX(st %g2, [%o1 - 0x08])
addx %g0, %g7, %g7
andcc %o3, 4, %g0
- EX2(st %g3, [%o1 - 0x04])
+ EX(st %g3, [%o1 - 0x04])
1: be 1f
andcc %o3, 3, %o3
- EX(ld [%o0 + 0x00], %g2, add %o3, 4)
+ EX(ld [%o0 + 0x00], %g2)
add %o1, 4, %o1
addcc %g2, %g7, %g7
- EX2(st %g2, [%o1 - 0x04])
+ EX(st %g2, [%o1 - 0x04])
addx %g0, %g7, %g7
andcc %o3, 3, %g0
add %o0, 4, %o0
@@ -276,14 +253,14 @@ cc_end_cruft:
subcc %o3, 2, %o3
b 4f
or %g0, %g0, %o4
-2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
+2: EX(lduh [%o0 + 0x00], %o4)
add %o0, 2, %o0
- EX2(sth %o4, [%o1 + 0x00])
+ EX(sth %o4, [%o1 + 0x00])
be 6f
add %o1, 2, %o1
sll %o4, 16, %o4
-4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
- EX2(stb %o5, [%o1 + 0x00])
+4: EX(ldub [%o0 + 0x00], %o5)
+ EX(stb %o5, [%o1 + 0x00])
sll %o5, 8, %o5
or %o5, %o4, %o4
6: addcc %o4, %g7, %g7
@@ -306,9 +283,9 @@ cc_dword_align:
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
- EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
+ EX(lduh [%o0 + 0x00], %g4)
sub %g1, 2, %g1
- EX2(sth %g4, [%o1 + 0x00])
+ EX(sth %g4, [%o1 + 0x00])
add %o0, 2, %o0
sll %g4, 16, %g4
addcc %g4, %g7, %g7
@@ -322,9 +299,9 @@ cc_dword_align:
or %g3, %g7, %g7
1: be 3f
andcc %g1, 0xffffff80, %g0
- EX(ld [%o0 + 0x00], %g4, add %g1, 0)
+ EX(ld [%o0 + 0x00], %g4)
sub %g1, 4, %g1
- EX2(st %g4, [%o1 + 0x00])
+ EX(st %g4, [%o1 + 0x00])
add %o0, 4, %o0
addcc %g4, %g7, %g7
add %o1, 4, %o1
@@ -354,7 +331,7 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10: EXT(5b, 10b, 20f) ! note for exception handling
+10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -379,7 +356,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12: EXT(cctbl, 12b, 22f) ! note for exception table handling
+12: EXT(cctbl, 12b) ! note for exception table handling
addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
@@ -390,7 +367,7 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
+11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -407,9 +384,9 @@ ccslow: cmp %g1, 0
be,a 1f
srl %g1, 1, %g4
sub %g1, 1, %g1
- EX(ldub [%o0], %g5, add %g1, 1)
+ EX(ldub [%o0], %g5)
add %o0, 1, %o0
- EX2(stb %g5, [%o1])
+ EX(stb %g5, [%o1])
srl %g1, 1, %g4
add %o1, 1, %o1
1: cmp %g4, 0
@@ -418,34 +395,34 @@ ccslow: cmp %g1, 0
andcc %o0, 2, %g0
be,a 1f
srl %g4, 1, %g4
- EX(lduh [%o0], %o4, add %g1, 0)
+ EX(lduh [%o0], %o4)
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %g4, 1, %g4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o0, 2, %o0
srl %g4, 1, %g4
add %o1, 2, %o1
1: cmp %g4, 0
be,a 2f
andcc %g1, 2, %g0
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
5: srl %o4, 24, %g2
srl %o4, 16, %g3
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
srl %o4, 8, %g2
- EX2(stb %g3, [%o1 + 1])
+ EX(stb %g3, [%o1 + 1])
add %o0, 4, %o0
- EX2(stb %g2, [%o1 + 2])
+ EX(stb %g2, [%o1 + 2])
addcc %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 3])
+ EX(stb %o4, [%o1 + 3])
addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %g4, 1, %g4 ! tricks
bne,a 5b
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
@@ -453,19 +430,19 @@ ccslow: cmp %g1, 0
add %g2, %g5, %g5
2: be,a 3f
andcc %g1, 1, %g0
- EX(lduh [%o0], %o4, and %g1, 3)
+ EX(lduh [%o0], %o4)
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o1, 2, %o1
3: be,a 1f
sll %g5, 16, %o4
- EX(ldub [%o0], %g2, add %g0, 1)
+ EX(ldub [%o0], %g2)
sll %g2, 8, %o4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
@@ -481,113 +458,10 @@ ccslow: cmp %g1, 0
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
-__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
-/* o2 = ((g2%20)&3)*8
- * o3 = g1 - (g2/20)*32 - o2 */
-20:
- cmp %g2, 20
- blu,a 1f
- and %g2, 3, %o2
- sub %g1, 32, %g1
- b 20b
- sub %g2, 20, %g2
-1:
- sll %o2, 3, %o2
- b 31f
- sub %g1, %o2, %o3
-
-/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
- * o3 = g1 - (g2/16)*32 - o2 */
-21:
- andcc %g2, 15, %o3
- srl %g2, 4, %g2
- be,a 1f
- clr %o2
- add %o3, 1, %o3
- and %o3, 14, %o3
- sll %o3, 3, %o2
-1:
- sll %g2, 5, %g2
- sub %g1, %g2, %o3
- b 31f
- sub %o3, %o2, %o3
-
-/* o0 += (g2/10)*16 - 0x70
- * 01 += (g2/10)*16 - 0x70
- * o2 = (g2 % 10) ? 8 : 0
- * o3 += 0x70 - (g2/10)*16 - o2 */
-22:
- cmp %g2, 10
- blu,a 1f
- sub %o0, 0x70, %o0
- add %o0, 16, %o0
- add %o1, 16, %o1
- sub %o3, 16, %o3
- b 22b
- sub %g2, 10, %g2
-1:
- sub %o1, 0x70, %o1
- add %o3, 0x70, %o3
- clr %o2
- tst %g2
- bne,a 1f
- mov 8, %o2
-1:
- b 31f
- sub %o3, %o2, %o3
-96:
- and %g1, 3, %g1
- sll %g4, 2, %g4
- add %g1, %g4, %o3
-30:
-/* %o1 is dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- clr %o2
-31:
-/* %o0 is src
- * %o1 is dst
- * %o2 is # of bytes to copy from src to dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- save %sp, -104, %sp
- mov %i5, %o0
- mov %i7, %o1
- mov %i4, %o2
- call lookup_fault
- mov %g7, %i4
- cmp %o0, 2
- bne 1f
- add %g0, -EFAULT, %i5
- tst %i2
- be 2f
- mov %i0, %o1
- mov %i1, %o0
-5:
- call memcpy
- mov %i2, %o2
- tst %o0
- bne,a 2f
- add %i3, %i2, %i3
- add %i1, %i2, %i1
-2:
- mov %i1, %o0
-6:
- call __bzero
- mov %i3, %o1
-1:
- ld [%sp + 168], %o2 ! struct_ptr of parent
- st %i5, [%o2]
+cc_fault:
ret
- restore
-
- .section __ex_table,#alloc
- .align 4
- .word 5b,2
- .word 6b,2
+ clr %o0
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 26c644ba3ecb..0c0268e77155 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -68,9 +68,10 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
EXPORT_SYMBOL(FUNC_NAME)
-FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
+FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
+ mov 1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S
index d20b9594f0c7..b0ba8d4dd439 100644
--- a/arch/sparc/lib/csum_copy_from_user.S
+++ b/arch/sparc/lib/csum_copy_from_user.S
@@ -9,14 +9,14 @@
.section .fixup, "ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_from_user
+#define FUNC_NAME csum_and_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#include "csum_copy.S"
diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S
index d71c0c81e8ab..91ba36dbf7d2 100644
--- a/arch/sparc/lib/csum_copy_to_user.S
+++ b/arch/sparc/lib/csum_copy_to_user.S
@@ -9,14 +9,14 @@
.section .fixup,"ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_to_user
+#define FUNC_NAME csum_and_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#include "csum_copy.S"
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 8071bfd72349..40ce087dfecf 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -288,8 +288,6 @@ no_context:
if (fixup > 10) {
extern const unsigned int __memset_start[];
extern const unsigned int __memset_end[];
- extern const unsigned int __csum_partial_copy_start[];
- extern const unsigned int __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n",
@@ -298,9 +296,7 @@ no_context:
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
- regs->pc < (unsigned long)__memset_end) ||
- (regs->pc >= (unsigned long)__csum_partial_copy_start &&
- regs->pc < (unsigned long)__csum_partial_copy_end)) {
+ regs->pc < (unsigned long)__memset_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 0ada98d5d09f..bca625a60186 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
#ifdef CONFIG_X86_32
# include <asm/checksum_32.h>
#else
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 11624c8a9d8d..17da95387997 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -27,9 +27,7 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -38,26 +36,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the
* access_ok().
*/
-static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
static inline __wsum csum_and_copy_from_user(const void __user *src,
- void *dst, int len,
- __wsum sum, int *err_ptr)
+ void *dst, int len)
{
__wsum ret;
might_sleep();
- if (!user_access_begin(src, len)) {
- if (len)
- *err_ptr = -EFAULT;
- return sum;
- }
- ret = csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ if (!user_access_begin(src, len))
+ return 0;
+ ret = csum_partial_copy_generic((__force void *)src, dst, len);
user_access_end();
return ret;
@@ -178,23 +170,17 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+ int len)
{
__wsum ret;
might_sleep();
- if (user_access_begin(dst, len)) {
- ret = csum_partial_copy_generic(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
- user_access_end();
- return ret;
- }
+ if (!user_access_begin(dst, len))
+ return 0;
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ ret = csum_partial_copy_generic(src, (__force void *)dst, len);
+ user_access_end();
+ return ret;
}
#endif /* _ASM_X86_CHECKSUM_32_H */
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 0a289b87e872..407beebadaf4 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -130,17 +130,11 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
-extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
-
-extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
+extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
+extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/**
* ip_compute_csum - Compute an 16bit IP checksum.
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index d1d768912368..4304320e51f4 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -253,28 +253,17 @@ EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
- int len, int sum, int *src_err_ptr, int *dst_err_ptr)
+ int len)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
*/
-#define SRC(y...) \
+#define EXC(y...) \
9999: y; \
_ASM_EXTABLE_UA(9999b, 6001f)
-#define DST(y...) \
- 9999: y; \
- _ASM_EXTABLE_UA(9999b, 6002f)
-
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
#define ARGBASE 16
@@ -285,20 +274,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
pushl %edi
pushl %esi
pushl %ebx
- movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
movl ARGBASE+8(%esp),%edi # dst
+ movl $-1, %eax # sum
testl $2, %edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
-SRC(1: movw (%esi), %bx )
+EXC(1: movw (%esi), %bx )
addl $2, %esi
-DST( movw %bx, (%edi) )
+EXC( movw %bx, (%edi) )
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
@@ -306,34 +295,34 @@ DST( movw %bx, (%edi) )
movl %ecx, FP(%esp)
shrl $5, %ecx
jz 2f
- testl %esi, %esi
-SRC(1: movl (%esi), %ebx )
-SRC( movl 4(%esi), %edx )
+ testl %esi, %esi # what's wrong with clc?
+EXC(1: movl (%esi), %ebx )
+EXC( movl 4(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
+EXC( movl %ebx, (%edi) )
adcl %edx, %eax
-DST( movl %edx, 4(%edi) )
+EXC( movl %edx, 4(%edi) )
-SRC( movl 8(%esi), %ebx )
-SRC( movl 12(%esi), %edx )
+EXC( movl 8(%esi), %ebx )
+EXC( movl 12(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 8(%edi) )
+EXC( movl %ebx, 8(%edi) )
adcl %edx, %eax
-DST( movl %edx, 12(%edi) )
+EXC( movl %edx, 12(%edi) )
-SRC( movl 16(%esi), %ebx )
-SRC( movl 20(%esi), %edx )
+EXC( movl 16(%esi), %ebx )
+EXC( movl 20(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 16(%edi) )
+EXC( movl %ebx, 16(%edi) )
adcl %edx, %eax
-DST( movl %edx, 20(%edi) )
+EXC( movl %edx, 20(%edi) )
-SRC( movl 24(%esi), %ebx )
-SRC( movl 28(%esi), %edx )
+EXC( movl 24(%esi), %ebx )
+EXC( movl 28(%esi), %edx )
adcl %ebx, %eax
-DST( movl %ebx, 24(%edi) )
+EXC( movl %ebx, 24(%edi) )
adcl %edx, %eax
-DST( movl %edx, 28(%edi) )
+EXC( movl %edx, 28(%edi) )
lea 32(%esi), %esi
lea 32(%edi), %edi
@@ -345,9 +334,9 @@ DST( movl %edx, 28(%edi) )
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
-SRC(3: movl (%esi), %ebx )
+EXC(3: movl (%esi), %ebx )
adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
+EXC( movl %ebx, (%edi) )
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
@@ -357,39 +346,24 @@ DST( movl %ebx, (%edi) )
jz 7f
cmpl $2, %ecx
jb 5f
-SRC( movw (%esi), %cx )
+EXC( movw (%esi), %cx )
leal 2(%esi), %esi
-DST( movw %cx, (%edi) )
+EXC( movw %cx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%ecx
-SRC(5: movb (%esi), %cl )
-DST( movb %cl, (%edi) )
+EXC(5: movb (%esi), %cl )
+EXC( movb %cl, (%edi) )
6: addl %ecx, %eax
adcl $0, %eax
7:
-5000:
# Exception handler:
.section .fixup, "ax"
6001:
- movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
-
- # zero the complete destination - computing the rest
- # is too much work
- movl ARGBASE+8(%esp), %edi # dst
- movl ARGBASE+12(%esp), %ecx # len
- xorl %eax,%eax
- rep ; stosb
-
- jmp 5000b
-
-6002:
- movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT,(%ebx)
- jmp 5000b
+ xorl %eax, %eax
+ jmp 7b
.previous
@@ -405,14 +379,14 @@ SYM_FUNC_END(csum_partial_copy_generic)
/* Version for PentiumII/PPro */
#define ROUND1(x) \
- SRC(movl x(%esi), %ebx ) ; \
+ EXC(movl x(%esi), %ebx ) ; \
addl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
+ EXC(movl %ebx, x(%edi) ) ;
#define ROUND(x) \
- SRC(movl x(%esi), %ebx ) ; \
+ EXC(movl x(%esi), %ebx ) ; \
adcl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
+ EXC(movl %ebx, x(%edi) ) ;
#define ARGBASE 12
@@ -423,7 +397,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
- movl ARGBASE+16(%esp),%eax #sum
+ movl $-1, %eax #sum
# movl %ecx, %edx
movl %ecx, %ebx
movl %esi, %edx
@@ -439,7 +413,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
JMP_NOSPEC ebx
1: addl $64,%esi
addl $64,%edi
- SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
+ EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl)
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
@@ -453,29 +427,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
jz 7f
cmpl $2, %edx
jb 5f
-SRC( movw (%esi), %dx )
+EXC( movw (%esi), %dx )
leal 2(%esi), %esi
-DST( movw %dx, (%edi) )
+EXC( movw %dx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%edx
5:
-SRC( movb (%esi), %dl )
-DST( movb %dl, (%edi) )
+EXC( movb (%esi), %dl )
+EXC( movb %dl, (%edi) )
6: addl %edx, %eax
adcl $0, %eax
7:
.section .fixup, "ax"
-6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
- # zero the complete destination (computing the rest is too much work)
- movl ARGBASE+8(%esp),%edi # dst
- movl ARGBASE+12(%esp),%ecx # len
- xorl %eax,%eax
- rep; stosb
- jmp 7b
-6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT, (%ebx)
+6001: xorl %eax, %eax
jmp 7b
.previous
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 3394a8ff7fd0..1fbd8ee9642d 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -18,9 +18,6 @@
* rdi source
* rsi destination
* edx len (32bit)
- * ecx sum (32bit)
- * r8 src_err_ptr (int)
- * r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
@@ -31,44 +28,32 @@
.macro source
10:
- _ASM_EXTABLE_UA(10b, .Lbad_source)
+ _ASM_EXTABLE_UA(10b, .Lfault)
.endm
.macro dest
20:
- _ASM_EXTABLE_UA(20b, .Lbad_dest)
+ _ASM_EXTABLE_UA(20b, .Lfault)
.endm
- /*
- * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
- * potentially unmapped kernel address.
- */
- .macro ignore L=.Lignore
-30:
- _ASM_EXTABLE(30b, \L)
- .endm
-
-
SYM_FUNC_START(csum_partial_copy_generic)
- cmpl $3*64, %edx
- jle .Lignore
-
-.Lignore:
- subq $7*8, %rsp
- movq %rbx, 2*8(%rsp)
- movq %r12, 3*8(%rsp)
- movq %r14, 4*8(%rsp)
- movq %r13, 5*8(%rsp)
- movq %r15, 6*8(%rsp)
+ subq $5*8, %rsp
+ movq %rbx, 0*8(%rsp)
+ movq %r12, 1*8(%rsp)
+ movq %r14, 2*8(%rsp)
+ movq %r13, 3*8(%rsp)
+ movq %r15, 4*8(%rsp)
- movq %r8, (%rsp)
- movq %r9, 1*8(%rsp)
-
- movl %ecx, %eax
+ movl $-1, %eax
+ xorl %r9d, %r9d
movl %edx, %ecx
+ cmpl $8, %ecx
+ jb .Lshort
- xorl %r9d, %r9d
- movq %rcx, %r12
+ testb $7, %sil
+ jne .Lunaligned
+.Laligned:
+ movl %ecx, %r12d
shrq $6, %r12
jz .Lhandle_tail /* < 64 */
@@ -99,7 +84,12 @@ SYM_FUNC_START(csum_partial_copy_generic)
source
movq 56(%rdi), %r13
- ignore 2f
+30:
+ /*
+ * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
+ * potentially unmapped kernel address.
+ */
+ _ASM_EXTABLE(30b, 2f)
prefetcht0 5*64(%rdi)
2:
adcq %rbx, %rax
@@ -131,8 +121,6 @@ SYM_FUNC_START(csum_partial_copy_generic)
dest
movq %r13, 56(%rsi)
-3:
-
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
@@ -142,8 +130,8 @@ SYM_FUNC_START(csum_partial_copy_generic)
/* do last up to 56 bytes */
.Lhandle_tail:
- /* ecx: count */
- movl %ecx, %r10d
+ /* ecx: count, rcx.63: the end result needs to be rol8 */
+ movq %rcx, %r10
andl $63, %ecx
shrl $3, %ecx
jz .Lfold
@@ -172,6 +160,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
.Lhandle_7:
movl %r10d, %ecx
andl $7, %ecx
+.L1: /* .Lshort rejoins the common path here */
shrl $1, %ecx
jz .Lhandle_1
movl $2, %edx
@@ -203,26 +192,65 @@ SYM_FUNC_START(csum_partial_copy_generic)
adcl %r9d, %eax /* carry */
.Lende:
- movq 2*8(%rsp), %rbx
- movq 3*8(%rsp), %r12
- movq 4*8(%rsp), %r14
- movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %r15
- addq $7*8, %rsp
+ testq %r10, %r10
+ js .Lwas_odd
+.Lout:
+ movq 0*8(%rsp), %rbx
+ movq 1*8(%rsp), %r12
+ movq 2*8(%rsp), %r14
+ movq 3*8(%rsp), %r13
+ movq 4*8(%rsp), %r15
+ addq $5*8, %rsp
ret
+.Lshort:
+ movl %ecx, %r10d
+ jmp .L1
+.Lunaligned:
+ xorl %ebx, %ebx
+ testb $1, %sil
+ jne .Lodd
+1: testb $2, %sil
+ je 2f
+ source
+ movw (%rdi), %bx
+ dest
+ movw %bx, (%rsi)
+ leaq 2(%rdi), %rdi
+ subq $2, %rcx
+ leaq 2(%rsi), %rsi
+ addq %rbx, %rax
+2: testb $4, %sil
+ je .Laligned
+ source
+ movl (%rdi), %ebx
+ dest
+ movl %ebx, (%rsi)
+ leaq 4(%rdi), %rdi
+ subq $4, %rcx
+ leaq 4(%rsi), %rsi
+ addq %rbx, %rax
+ jmp .Laligned
+
+.Lodd:
+ source
+ movb (%rdi), %bl
+ dest
+ movb %bl, (%rsi)
+ leaq 1(%rdi), %rdi
+ leaq 1(%rsi), %rsi
+ /* decrement, set MSB */
+ leaq -1(%rcx, %rcx), %rcx
+ rorq $1, %rcx
+ shll $8, %ebx
+ addq %rbx, %rax
+ jmp 1b
+
+.Lwas_odd:
+ roll $8, %eax
+ jmp .Lout
- /* Exception handlers. Very simple, zeroing is done in the wrappers */
-.Lbad_source:
- movq (%rsp), %rax
- testq %rax, %rax
- jz .Lende
- movl $-EFAULT, (%rax)
- jmp .Lende
-
-.Lbad_dest:
- movq 8(%rsp), %rax
- testq %rax, %rax
- jz .Lende
- movl $-EFAULT, (%rax)
- jmp .Lende
+ /* Exception: just return 0 */
+.Lfault:
+ xorl %eax, %eax
+ jmp .Lout
SYM_FUNC_END(csum_partial_copy_generic)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index ee63d7576fd2..189344924a2b 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -21,52 +21,16 @@
* src and dst are best aligned to 64bits.
*/
__wsum
-csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp)
+csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- might_sleep();
- *errp = 0;
+ __wsum sum;
+ might_sleep();
if (!user_access_begin(src, len))
- goto out_err;
-
- /*
- * Why 6, not 7? To handle odd addresses aligned we
- * would need to do considerable complications to fix the
- * checksum which is defined as an 16bit accumulator. The
- * fix alignment code is primarily for performance
- * compatibility with 32bit and that will handle odd
- * addresses slowly too.
- */
- if (unlikely((unsigned long)src & 6)) {
- while (((unsigned long)src & 6) && len >= 2) {
- __u16 val16;
-
- unsafe_get_user(val16, (const __u16 __user *)src, out);
-
- *(__u16 *)dst = val16;
- isum = (__force __wsum)add32_with_carry(
- (__force unsigned)isum, val16);
- src += 2;
- dst += 2;
- len -= 2;
- }
- }
- isum = csum_partial_copy_generic((__force const void *)src,
- dst, len, isum, errp, NULL);
- user_access_end();
- if (unlikely(*errp))
- goto out_err;
-
- return isum;
-
-out:
+ return 0;
+ sum = csum_partial_copy_generic((__force const void *)src, dst, len);
user_access_end();
-out_err:
- *errp = -EFAULT;
- memset(dst, 0, len);
-
- return isum;
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -82,40 +46,16 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
* src and dst are best aligned to 64bits.
*/
__wsum
-csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp)
+csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- __wsum ret;
+ __wsum sum;
might_sleep();
-
- if (!user_access_begin(dst, len)) {
- *errp = -EFAULT;
+ if (!user_access_begin(dst, len))
return 0;
- }
-
- if (unlikely((unsigned long)dst & 6)) {
- while (((unsigned long)dst & 6) && len >= 2) {
- __u16 val16 = *(__u16 *)src;
-
- isum = (__force __wsum)add32_with_carry(
- (__force unsigned)isum, val16);
- unsafe_put_user(val16, (__u16 __user *)dst, out);
- src += 2;
- dst += 2;
- len -= 2;
- }
- }
-
- *errp = 0;
- ret = csum_partial_copy_generic(src, (void __force *)dst,
- len, isum, NULL, errp);
- user_access_end();
- return ret;
-out:
+ sum = csum_partial_copy_generic(src, (void __force *)dst, len);
user_access_end();
- *errp = -EFAULT;
- return isum;
+ return sum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
@@ -129,9 +69,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
* Returns an 32bit unfolded checksum of the buffer.
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
index ff6bba2c8ab6..b07824500363 100644
--- a/arch/x86/um/asm/checksum.h
+++ b/arch/x86/um/asm/checksum.h
@@ -20,22 +20,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
- *
- * If you use these functions directly please don't forget the
- * access_ok().
- */
-
-static __inline__
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h
index b9ac7c9eb72c..0b13c2947ad1 100644
--- a/arch/x86/um/asm/checksum_32.h
+++ b/arch/x86/um/asm/checksum_32.h
@@ -35,27 +35,4 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum, int *err_ptr)
-{
- if (access_ok(dst, len)) {
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- return (__force __wsum)-1;
- }
-
- return csum_partial(src, len, sum);
- }
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
-}
-
#endif
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index 243a5fe79d3c..44ec1d0b2a35 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -37,32 +37,27 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*/
static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_generic((__force const void *)src, dst,
- len, sum, err_ptr, NULL);
- if (len)
- *err_ptr = -EFAULT;
- return sum;
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@@ -243,15 +238,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst, int len,
- __wsum sum, int *err_ptr)
+ void __user *dst, int len)
{
- if (access_ok(dst, len))
- return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic(src, (__force void *)dst, len);
}
#endif
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 4cb9ca58d9ad..cf1bed1a5bd6 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -175,19 +175,14 @@ ENDPROC(csum_partial)
*/
/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
- int sum, int *src_err_ptr, int *dst_err_ptr)
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
a2 = src
a3 = dst
a4 = len
a5 = sum
- a6 = src_err_ptr
- a7 = dst_err_ptr
a8 = temp
a9 = temp
a10 = temp
- a11 = original len for exception handling
- a12 = original dst for exception handling
This function is optimized for 4-byte aligned addresses. Other
alignments work, but not nearly as efficiently.
@@ -196,8 +191,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic)
abi_entry_default
- mov a12, a3
- mov a11, a4
+ movi a5, -1
or a10, a2, a3
/* We optimize the following alignment tests for the 4-byte
@@ -228,26 +222,26 @@ ENTRY(csum_partial_copy_generic)
#endif
EX(10f) l32i a9, a2, 0
EX(10f) l32i a8, a2, 4
-EX(11f) s32i a9, a3, 0
-EX(11f) s32i a8, a3, 4
+EX(10f) s32i a9, a3, 0
+EX(10f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 8
EX(10f) l32i a8, a2, 12
-EX(11f) s32i a9, a3, 8
-EX(11f) s32i a8, a3, 12
+EX(10f) s32i a9, a3, 8
+EX(10f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 16
EX(10f) l32i a8, a2, 20
-EX(11f) s32i a9, a3, 16
-EX(11f) s32i a8, a3, 20
+EX(10f) s32i a9, a3, 16
+EX(10f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 24
EX(10f) l32i a8, a2, 28
-EX(11f) s32i a9, a3, 24
-EX(11f) s32i a8, a3, 28
+EX(10f) s32i a9, a3, 24
+EX(10f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@@ -267,7 +261,7 @@ EX(11f) s32i a8, a3, 28
.Loop6:
#endif
EX(10f) l32i a9, a2, 0
-EX(11f) s32i a9, a3, 0
+EX(10f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@@ -298,7 +292,7 @@ EX(11f) s32i a9, a3, 0
.Loop7:
#endif
EX(10f) l16ui a9, a2, 0
-EX(11f) s16i a9, a3, 0
+EX(10f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@@ -309,7 +303,7 @@ EX(11f) s16i a9, a3, 0
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
EX(10f) l8ui a9, a2, 0
-EX(11f) s8i a9, a3, 0
+EX(10f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@@ -334,8 +328,8 @@ EX(11f) s8i a9, a3, 0
#endif
EX(10f) l8ui a9, a2, 0
EX(10f) l8ui a8, a2, 1
-EX(11f) s8i a9, a3, 0
-EX(11f) s8i a8, a3, 1
+EX(10f) s8i a9, a3, 0
+EX(10f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@@ -356,38 +350,7 @@ ENDPROC(csum_partial_copy_generic)
# Exception handler:
.section .fixup, "ax"
-/*
- a6 = src_err_ptr
- a7 = dst_err_ptr
- a11 = original len for exception handling
- a12 = original dst for exception handling
-*/
-
10:
- _movi a2, -EFAULT
- s32i a2, a6, 0 /* src_err_ptr */
-
- # clear the complete destination - computing the rest
- # is too much work
- movi a2, 0
-#if XCHAL_HAVE_LOOPS
- loopgtz a11, 2f
-#else
- beqz a11, 2f
- add a11, a11, a12 /* a11 = ending address */
-.Leloop:
-#endif
- s8i a2, a12, 0
- addi a12, a12, 1
-#if !XCHAL_HAVE_LOOPS
- blt a12, a11, .Leloop
-#endif
-2:
- abi_ret_default
-
-11:
- movi a2, -EFAULT
- s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
abi_ret_default