aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/csum_partial.S282
-rw-r--r--arch/mips/lib/memcpy.S416
-rw-r--r--arch/mips/lib/memset.S146
-rw-r--r--arch/mips/lib/strlen_user.S36
-rw-r--r--arch/mips/lib/strncpy_user.S40
-rw-r--r--arch/mips/lib/strnlen_user.S36
6 files changed, 638 insertions, 318 deletions
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index a6adffbb4e5f..2e4825e48388 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -8,6 +8,7 @@
* Copyright (C) 1998, 1999 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
@@ -296,7 +297,7 @@ LEAF(csum_partial)
* checksum and copy routines based on memcpy.S
*
* csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_user(src, dst, len, sum, errp)
+ * __csum_partial_copy_kernel(src, dst, len, sum, errp)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -327,20 +328,58 @@ LEAF(csum_partial)
* These handlers do not need to overwrite any data.
*/
-#define EXC(inst_reg,addr,handler) \
-9: inst_reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
- .previous
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+#define USEROP 1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn : Load/store instruction
+ * type : Instruction type
+ * reg : Register
+ * addr : Address
+ * handler : Exception handler
+ */
+#define EXC(insn, type, reg, addr, handler) \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ /* This is enabled in EVA mode */ \
+ .else; \
+ /* If loading from user or storing to user */ \
+ .if ((\from == USEROP) && (type == LD_INSN)) || \
+ ((\to == USEROP) && (type == ST_INSN)); \
+9: __BUILD_EVA_INSN(insn##e, reg, addr); \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ .else; \
+ /* EVA without exception */ \
+ insn reg, addr; \
+ .endif; \
+ .endif
+
+#undef LOAD
#ifdef USE_DOUBLE
-#define LOAD ld
-#define LOADL ldl
-#define LOADR ldr
-#define STOREL sdl
-#define STORER sdr
-#define STORE sd
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -352,12 +391,15 @@ LEAF(csum_partial)
#else
-#define LOAD lw
-#define LOADL lwl
-#define LOADR lwr
-#define STOREL swl
-#define STORER swr
-#define STORE sw
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -396,14 +438,20 @@ LEAF(csum_partial)
.set at=v1
#endif
-LEAF(__csum_partial_copy_user)
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+
PTR_ADDU AT, src, len /* See (1) above. */
+ /* initialize __nocheck if this the first time we execute this
+ * macro
+ */
#ifdef CONFIG_64BIT
move errptr, a4
#else
lw errptr, 16(sp)
#endif
-FEXPORT(csum_partial_copy_nocheck)
+ .if \__nocheck == 1
+ FEXPORT(csum_partial_copy_nocheck)
+ .endif
move sum, zero
move odd, zero
/*
@@ -419,48 +467,48 @@ FEXPORT(csum_partial_copy_nocheck)
*/
sltu t2, len, NBYTES
and t1, dst, ADDRMASK
- bnez t2, .Lcopy_bytes_checklen
+ bnez t2, .Lcopy_bytes_checklen\@
and t0, src, ADDRMASK
andi odd, dst, 0x1 /* odd buffer? */
- bnez t1, .Ldst_unaligned
+ bnez t1, .Ldst_unaligned\@
nop
- bnez t0, .Lsrc_unaligned_dst_aligned
+ bnez t0, .Lsrc_unaligned_dst_aligned\@
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
*/
-.Lboth_aligned:
+.Lboth_aligned\@:
SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
- beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
+ beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
nop
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
-EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
-EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
-EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy)
-EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy)
+ LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+ LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
+ LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
+ LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
+ STORE(t0, UNIT(0)(dst), .Ls_exc\@)
ADDC(sum, t0)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
+ STORE(t1, UNIT(1)(dst), .Ls_exc\@)
ADDC(sum, t1)
-EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
+ STORE(t2, UNIT(2)(dst), .Ls_exc\@)
ADDC(sum, t2)
-EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
+ STORE(t3, UNIT(3)(dst), .Ls_exc\@)
ADDC(sum, t3)
-EXC( STORE t4, UNIT(4)(dst), .Ls_exc)
+ STORE(t4, UNIT(4)(dst), .Ls_exc\@)
ADDC(sum, t4)
-EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
+ STORE(t5, UNIT(5)(dst), .Ls_exc\@)
ADDC(sum, t5)
-EXC( STORE t6, UNIT(6)(dst), .Ls_exc)
+ STORE(t6, UNIT(6)(dst), .Ls_exc\@)
ADDC(sum, t6)
-EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
+ STORE(t7, UNIT(7)(dst), .Ls_exc\@)
ADDC(sum, t7)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -471,44 +519,44 @@ EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
/*
* len == the number of bytes left to copy < 8*NBYTES
*/
-.Lcleanup_both_aligned:
+.Lcleanup_both_aligned\@:
#define rem t7
- beqz len, .Ldone
+ beqz len, .Ldone\@
sltu t0, len, 4*NBYTES
- bnez t0, .Lless_than_4units
+ bnez t0, .Lless_than_4units\@
and rem, len, (NBYTES-1) # rem = len % NBYTES
/*
* len >= 4*NBYTES
*/
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
+ LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
+ STORE(t0, UNIT(0)(dst), .Ls_exc\@)
ADDC(sum, t0)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
+ STORE(t1, UNIT(1)(dst), .Ls_exc\@)
ADDC(sum, t1)
-EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
+ STORE(t2, UNIT(2)(dst), .Ls_exc\@)
ADDC(sum, t2)
-EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
+ STORE(t3, UNIT(3)(dst), .Ls_exc\@)
ADDC(sum, t3)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
- beqz len, .Ldone
+ beqz len, .Ldone\@
.set noreorder
-.Lless_than_4units:
+.Lless_than_4units\@:
/*
* rem = len % NBYTES
*/
- beq rem, len, .Lcopy_bytes
+ beq rem, len, .Lcopy_bytes\@
nop
1:
-EXC( LOAD t0, 0(src), .Ll_exc)
+ LOAD(t0, 0(src), .Ll_exc\@)
ADD src, src, NBYTES
SUB len, len, NBYTES
-EXC( STORE t0, 0(dst), .Ls_exc)
+ STORE(t0, 0(dst), .Ls_exc\@)
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -527,20 +575,20 @@ EXC( STORE t0, 0(dst), .Ls_exc)
* more instruction-level parallelism.
*/
#define bits t2
- beqz len, .Ldone
+ beqz len, .Ldone\@
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
-EXC( LOAD t0, 0(src), .Ll_exc)
+ LOAD(t0, 0(src), .Ll_exc\@)
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
-EXC( STREST t0, -1(t1), .Ls_exc)
+ STREST(t0, -1(t1), .Ls_exc\@)
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
- b .Ldone
+ b .Ldone\@
.set noreorder
-.Ldst_unaligned:
+.Ldst_unaligned\@:
/*
* dst is unaligned
* t0 = src & ADDRMASK
@@ -551,25 +599,25 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+ LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
ADD t2, zero, NBYTES
-EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
+ LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
-EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
+ STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
ADDC(sum, t3)
- beq len, t2, .Ldone
+ beq len, t2, .Ldone\@
SUB len, len, t2
ADD dst, dst, t2
- beqz match, .Lboth_aligned
+ beqz match, .Lboth_aligned\@
ADD src, src, t2
-.Lsrc_unaligned_dst_aligned:
+.Lsrc_unaligned_dst_aligned\@:
SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
- beqz t0, .Lcleanup_src_unaligned
+ beqz t0, .Lcleanup_src_unaligned\@
and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
@@ -578,53 +626,53 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
SUB len, len, 4*NBYTES
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
-EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
-EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
-EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+ LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+ LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
+ STORE(t0, UNIT(0)(dst), .Ls_exc\@)
ADDC(sum, t0)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
+ STORE(t1, UNIT(1)(dst), .Ls_exc\@)
ADDC(sum, t1)
-EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
+ STORE(t2, UNIT(2)(dst), .Ls_exc\@)
ADDC(sum, t2)
-EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
+ STORE(t3, UNIT(3)(dst), .Ls_exc\@)
ADDC(sum, t3)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
.set noreorder
-.Lcleanup_src_unaligned:
- beqz len, .Ldone
+.Lcleanup_src_unaligned\@:
+ beqz len, .Ldone\@
and rem, len, NBYTES-1 # rem = len % NBYTES
- beq rem, len, .Lcopy_bytes
+ beq rem, len, .Lcopy_bytes\@
nop
1:
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
ADD src, src, NBYTES
SUB len, len, NBYTES
-EXC( STORE t0, 0(dst), .Ls_exc)
+ STORE(t0, 0(dst), .Ls_exc\@)
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
bne len, rem, 1b
.set noreorder
-.Lcopy_bytes_checklen:
- beqz len, .Ldone
+.Lcopy_bytes_checklen\@:
+ beqz len, .Ldone\@
nop
-.Lcopy_bytes:
+.Lcopy_bytes\@:
/* 0 < len < NBYTES */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define SHIFT_START 0
@@ -637,12 +685,12 @@ EXC( STORE t0, 0(dst), .Ls_exc)
li t3, SHIFT_START # shift
/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
-EXC( lbu t0, N(src), .Ll_exc_copy); \
+ LOADBU(t0, N(src), .Ll_exc_copy\@); \
SUB len, len, 1; \
-EXC( sb t0, N(dst), .Ls_exc); \
+ STOREB(t0, N(dst), .Ls_exc\@); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
- beqz len, .Lcopy_bytes_done; \
+ beqz len, .Lcopy_bytes_done\@; \
or t2, t0
COPY_BYTE(0)
@@ -653,14 +701,14 @@ EXC( sb t0, N(dst), .Ls_exc); \
COPY_BYTE(4)
COPY_BYTE(5)
#endif
-EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy)
+ LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
SUB len, len, 1
-EXC( sb t0, NBYTES-2(dst), .Ls_exc)
+ STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
SLLV t0, t0, t3
or t2, t0
-.Lcopy_bytes_done:
+.Lcopy_bytes_done\@:
ADDC(sum, t2)
-.Ldone:
+.Ldone\@:
/* fold checksum */
#ifdef USE_DOUBLE
dsll32 v1, sum, 0
@@ -689,7 +737,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
jr ra
.set noreorder
-.Ll_exc_copy:
+.Ll_exc_copy\@:
/*
* Copy bytes from src until faulting load address (or until a
* lb faults)
@@ -700,11 +748,11 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
*
* Assumes src < THREAD_BUADDR($28)
*/
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
li t2, SHIFT_START
- LOAD t0, THREAD_BUADDR(t0)
+ LOADK t0, THREAD_BUADDR(t0)
1:
-EXC( lbu t1, 0(src), .Ll_exc)
+ LOADBU(t1, 0(src), .Ll_exc\@)
ADD src, src, 1
sb t1, 0(dst) # can't fault -- we're copy_from_user
SLLV t1, t1, t2
@@ -714,10 +762,10 @@ EXC( lbu t1, 0(src), .Ll_exc)
ADD dst, dst, 1
bne src, t0, 1b
.set noreorder
-.Ll_exc:
- LOAD t0, TI_TASK($28)
+.Ll_exc\@:
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
/*
@@ -733,7 +781,7 @@ EXC( lbu t1, 0(src), .Ll_exc)
*/
.set reorder /* DADDI_WAR */
SUB src, len, 1
- beqz len, .Ldone
+ beqz len, .Ldone\@
.set noreorder
1: sb zero, 0(dst)
ADD dst, dst, 1
@@ -748,13 +796,31 @@ EXC( lbu t1, 0(src), .Ll_exc)
SUB src, src, v1
#endif
li v1, -EFAULT
- b .Ldone
+ b .Ldone\@
sw v1, (errptr)
-.Ls_exc:
+.Ls_exc\@:
li v0, -1 /* invalid checksum */
li v1, -EFAULT
jr ra
sw v1, (errptr)
.set pop
- END(__csum_partial_copy_user)
+ .endm
+
+LEAF(__csum_partial_copy_kernel)
+#ifndef CONFIG_EVA
+FEXPORT(__csum_partial_copy_to_user)
+FEXPORT(__csum_partial_copy_from_user)
+#endif
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
+END(__csum_partial_copy_kernel)
+
+#ifdef CONFIG_EVA
+LEAF(__csum_partial_copy_to_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+END(__csum_partial_copy_to_user)
+
+LEAF(__csum_partial_copy_from_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+END(__csum_partial_copy_from_user)
+#endif
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c5c40dad0bbf..c17ef80cf65a 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -10,6 +10,7 @@
* Copyright (C) 2002 Broadcom, Inc.
* memcpy/copy_user author: Mark Vandevoorde
* Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*
* Mnemonic names for arguments to memcpy/__copy_user
*/
@@ -85,11 +86,51 @@
* they're not protected.
*/
-#define EXC(inst_reg,addr,handler) \
-9: inst_reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
- .previous
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+/* Pretech type */
+#define SRC_PREFETCH 1
+#define DST_PREFETCH 2
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+#define USEROP 1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn : Load/store instruction
+ * type : Instruction type
+ * reg : Register
+ * addr : Address
+ * handler : Exception handler
+ */
+
+#define EXC(insn, type, reg, addr, handler) \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ /* This is assembled in EVA mode */ \
+ .else; \
+ /* If loading from user or storing to user */ \
+ .if ((\from == USEROP) && (type == LD_INSN)) || \
+ ((\to == USEROP) && (type == ST_INSN)); \
+9: __BUILD_EVA_INSN(insn##e, reg, addr); \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ .else; \
+ /* \
+ * Still in EVA, but no need for \
+ * exception handler or EVA insn \
+ */ \
+ insn reg, addr; \
+ .endif; \
+ .endif
/*
* Only on the 64-bit kernel we can made use of 64-bit registers.
@@ -100,12 +141,13 @@
#ifdef USE_DOUBLE
-#define LOAD ld
-#define LOADL ldl
-#define LOADR ldr
-#define STOREL sdl
-#define STORER sdr
-#define STORE sd
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -136,12 +178,13 @@
#else
-#define LOAD lw
-#define LOADL lwl
-#define LOADR lwr
-#define STOREL swl
-#define STORER swr
-#define STORE sw
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -154,6 +197,33 @@
#endif /* USE_DOUBLE */
+#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
+
+#define _PREF(hint, addr, type) \
+ .if \mode == LEGACY_MODE; \
+ PREF(hint, addr); \
+ .else; \
+ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
+ ((\to == USEROP) && (type == DST_PREFETCH)); \
+ /* \
+ * PREFE has only 9 bits for the offset \
+ * compared to PREF which has 16, so it may \
+ * need to use the $at register but this \
+ * register should remain intact because it's \
+ * used later on. Therefore use $v1. \
+ */ \
+ .set at=v1; \
+ PREFE(hint, addr); \
+ .set noat; \
+ .else; \
+ PREF(hint, addr); \
+ .endif; \
+ .endif
+
+#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
+#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
+
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
#define LDREST LOADL
@@ -182,27 +252,23 @@
.set at=v1
#endif
-/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
- b __copy_user_common
- li t6, 1
- END(__copy_user_inatomic)
-
-/*
- * A combined memcpy/__copy_user
- * __copy_user sets len to 0 for success; else to an upper bound of
- * the number of uncopied bytes.
- * memcpy sets v0 to dst.
- */
.align 5
-LEAF(memcpy) /* a0=dst a1=src a2=len */
- move v0, dst /* return value */
-.L__memcpy:
-FEXPORT(__copy_user)
- li t6, 0 /* not inatomic */
-__copy_user_common:
+
+ /*
+ * Macro to build the __copy_user common code
+ * Arguements:
+ * mode : LEGACY_MODE or EVA_MODE
+ * from : Source operand. USEROP or KERNELOP
+ * to : Destination operand. USEROP or KERNELOP
+ */
+ .macro __BUILD_COPY_USER mode, from, to
+
+ /* initialize __memcpy if this the first time we execute this macro */
+ .ifnotdef __memcpy
+ .set __memcpy, 1
+ .hidden __memcpy /* make sure it does not leak */
+ .endif
+
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -217,94 +283,94 @@ __copy_user_common:
*
* If len < NBYTES use byte operations.
*/
- PREF( 0, 0(src) )
- PREF( 1, 0(dst) )
+ PREFS( 0, 0(src) )
+ PREFD( 1, 0(dst) )
sltu t2, len, NBYTES
and t1, dst, ADDRMASK
- PREF( 0, 1*32(src) )
- PREF( 1, 1*32(dst) )
- bnez t2, .Lcopy_bytes_checklen
+ PREFS( 0, 1*32(src) )
+ PREFD( 1, 1*32(dst) )
+ bnez t2, .Lcopy_bytes_checklen\@
and t0, src, ADDRMASK
- PREF( 0, 2*32(src) )
- PREF( 1, 2*32(dst) )
- bnez t1, .Ldst_unaligned
+ PREFS( 0, 2*32(src) )
+ PREFD( 1, 2*32(dst) )
+ bnez t1, .Ldst_unaligned\@
nop
- bnez t0, .Lsrc_unaligned_dst_aligned
+ bnez t0, .Lsrc_unaligned_dst_aligned\@
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
*/
-.Lboth_aligned:
+.Lboth_aligned\@:
SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
- beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
+ beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
- PREF( 0, 3*32(src) )
- PREF( 1, 3*32(dst) )
+ PREFS( 0, 3*32(src) )
+ PREFD( 1, 3*32(dst) )
.align 4
1:
R10KCBARRIER(0(ra))
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
+ LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
SUB len, len, 8*NBYTES
-EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
-EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy)
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u)
-EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy)
-EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy)
+ LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+ LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
+ LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
+ LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
ADD src, src, 8*NBYTES
ADD dst, dst, 8*NBYTES
-EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u)
-EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u)
-EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u)
-EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u)
-EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u)
-EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u)
- PREF( 0, 8*32(src) )
- PREF( 1, 8*32(dst) )
+ STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
+ STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
+ STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
+ STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
+ STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
+ STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
+ PREFS( 0, 8*32(src) )
+ PREFD( 1, 8*32(dst) )
bne len, rem, 1b
nop
/*
* len == rem == the number of bytes left to copy < 8*NBYTES
*/
-.Lcleanup_both_aligned:
- beqz len, .Ldone
+.Lcleanup_both_aligned\@:
+ beqz len, .Ldone\@
sltu t0, len, 4*NBYTES
- bnez t0, .Lless_than_4units
+ bnez t0, .Lless_than_4units\@
and rem, len, (NBYTES-1) # rem = len % NBYTES
/*
* len >= 4*NBYTES
*/
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
+ LOAD( t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
R10KCBARRIER(0(ra))
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
-EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
-EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+ STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+ STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
- beqz len, .Ldone
+ beqz len, .Ldone\@
.set noreorder
-.Lless_than_4units:
+.Lless_than_4units\@:
/*
* rem = len % NBYTES
*/
- beq rem, len, .Lcopy_bytes
+ beq rem, len, .Lcopy_bytes\@
nop
1:
R10KCBARRIER(0(ra))
-EXC( LOAD t0, 0(src), .Ll_exc)
+ LOAD(t0, 0(src), .Ll_exc\@)
ADD src, src, NBYTES
SUB len, len, NBYTES
-EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ STORE(t0, 0(dst), .Ls_exc_p1u\@)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
bne rem, len, 1b
@@ -322,17 +388,17 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
* more instruction-level parallelism.
*/
#define bits t2
- beqz len, .Ldone
+ beqz len, .Ldone\@
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
-EXC( LOAD t0, 0(src), .Ll_exc)
+ LOAD(t0, 0(src), .Ll_exc\@)
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
-EXC( STREST t0, -1(t1), .Ls_exc)
+ STREST(t0, -1(t1), .Ls_exc\@)
jr ra
move len, zero
-.Ldst_unaligned:
+.Ldst_unaligned\@:
/*
* dst is unaligned
* t0 = src & ADDRMASK
@@ -343,25 +409,25 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+ LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
ADD t2, zero, NBYTES
-EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
+ LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
R10KCBARRIER(0(ra))
-EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
- beq len, t2, .Ldone
+ STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ beq len, t2, .Ldone\@
SUB len, len, t2
ADD dst, dst, t2
- beqz match, .Lboth_aligned
+ beqz match, .Lboth_aligned\@
ADD src, src, t2
-.Lsrc_unaligned_dst_aligned:
+.Lsrc_unaligned_dst_aligned\@:
SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
- PREF( 0, 3*32(src) )
- beqz t0, .Lcleanup_src_unaligned
+ PREFS( 0, 3*32(src) )
+ beqz t0, .Lcleanup_src_unaligned\@
and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
- PREF( 1, 3*32(dst) )
+ PREFD( 1, 3*32(dst) )
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -370,58 +436,58 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* are to the same unit (unless src is aligned, but it's not).
*/
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
SUB len, len, 4*NBYTES
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
-EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
-EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
-EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
- PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+ LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+ LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
-EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
-EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
-EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
-EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+ STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+ STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
+ PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
.set noreorder
-.Lcleanup_src_unaligned:
- beqz len, .Ldone
+.Lcleanup_src_unaligned\@:
+ beqz len, .Ldone\@
and rem, len, NBYTES-1 # rem = len % NBYTES
- beq rem, len, .Lcopy_bytes
+ beq rem, len, .Lcopy_bytes\@
nop
1:
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
ADD src, src, NBYTES
SUB len, len, NBYTES
-EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ STORE(t0, 0(dst), .Ls_exc_p1u\@)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
bne len, rem, 1b
.set noreorder
-.Lcopy_bytes_checklen:
- beqz len, .Ldone
+.Lcopy_bytes_checklen\@:
+ beqz len, .Ldone\@
nop
-.Lcopy_bytes:
+.Lcopy_bytes\@:
/* 0 < len < NBYTES */
R10KCBARRIER(0(ra))
#define COPY_BYTE(N) \
-EXC( lb t0, N(src), .Ll_exc); \
+ LOADB(t0, N(src), .Ll_exc\@); \
SUB len, len, 1; \
- beqz len, .Ldone; \
-EXC( sb t0, N(dst), .Ls_exc_p1)
+ beqz len, .Ldone\@; \
+ STOREB(t0, N(dst), .Ls_exc_p1\@)
COPY_BYTE(0)
COPY_BYTE(1)
@@ -431,16 +497,19 @@ EXC( sb t0, N(dst), .Ls_exc_p1)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
-EXC( lb t0, NBYTES-2(src), .Ll_exc)
+ LOADB(t0, NBYTES-2(src), .Ll_exc\@)
SUB len, len, 1
jr ra
-EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1)
-.Ldone:
+ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
+.Ldone\@:
jr ra
- nop
+ .if __memcpy == 1
END(memcpy)
+ .set __memcpy, 0
+ .hidden __memcpy
+ .endif
-.Ll_exc_copy:
+.Ll_exc_copy\@:
/*
* Copy bytes from src until faulting load address (or until a
* lb faults)
@@ -451,24 +520,24 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1)
*
* Assumes src < THREAD_BUADDR($28)
*/
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0)
+ LOADK t0, THREAD_BUADDR(t0)
1:
-EXC( lb t1, 0(src), .Ll_exc)
+ LOADB(t1, 0(src), .Ll_exc\@)
ADD src, src, 1
sb t1, 0(dst) # can't fault -- we're copy_from_user
.set reorder /* DADDI_WAR */
ADD dst, dst, 1
bne src, t0, 1b
.set noreorder
-.Ll_exc:
- LOAD t0, TI_TASK($28)
+.Ll_exc\@:
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
- bnez t6, .Ldone /* Skip the zeroing part if inatomic */
+ bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
@@ -482,7 +551,7 @@ EXC( lb t1, 0(src), .Ll_exc)
*/
.set reorder /* DADDI_WAR */
SUB src, len, 1
- beqz len, .Ldone
+ beqz len, .Ldone\@
.set noreorder
1: sb zero, 0(dst)
ADD dst, dst, 1
@@ -503,7 +572,7 @@ EXC( lb t1, 0(src), .Ll_exc)
#define SEXC(n) \
.set reorder; /* DADDI_WAR */ \
-.Ls_exc_p ## n ## u: \
+.Ls_exc_p ## n ## u\@: \
ADD len, len, n*NBYTES; \
jr ra; \
.set noreorder
@@ -517,14 +586,15 @@ SEXC(3)
SEXC(2)
SEXC(1)
-.Ls_exc_p1:
+.Ls_exc_p1\@:
.set reorder /* DADDI_WAR */
ADD len, len, 1
jr ra
.set noreorder
-.Ls_exc:
+.Ls_exc\@:
jr ra
nop
+ .endm
.align 5
LEAF(memmove)
@@ -575,3 +645,71 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
jr ra
move a2, zero
END(__rmemcpy)
+
+/*
+ * t6 is used as a flag to note inatomic mode.
+ */
+LEAF(__copy_user_inatomic)
+ b __copy_user_common
+ li t6, 1
+ END(__copy_user_inatomic)
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, dst /* return value */
+.L__memcpy:
+FEXPORT(__copy_user)
+ li t6, 0 /* not inatomic */
+__copy_user_common:
+ /* Legacy Mode, user <-> user */
+ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+
+#ifdef CONFIG_EVA
+
+/*
+ * For EVA we need distinct symbols for reading and writing to user space.
+ * This is because we need to use specific EVA instructions to perform the
+ * virtual <-> physical translation when a virtual address is actually in user
+ * space
+ */
+
+LEAF(__copy_user_inatomic_eva)
+ b __copy_from_user_common
+ li t6, 1
+ END(__copy_user_inatomic_eva)
+
+/*
+ * __copy_from_user (EVA)
+ */
+
+LEAF(__copy_from_user_eva)
+ li t6, 0 /* not inatomic */
+__copy_from_user_common:
+ __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
+END(__copy_from_user_eva)
+
+
+
+/*
+ * __copy_to_user (EVA)
+ */
+
+LEAF(__copy_to_user_eva)
+__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
+END(__copy_to_user_eva)
+
+/*
+ * __copy_in_user (EVA)
+ */
+
+LEAF(__copy_in_user_eva)
+__BUILD_COPY_USER EVA_MODE USEROP USEROP
+END(__copy_in_user_eva)
+
+#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 0580194e7402..7b0e5462ca51 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -34,13 +34,27 @@
#define FILLPTRG t0
#endif
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+
+/*
+ * No need to protect it with EVA #ifdefery. The generated block of code
+ * will never be assembled if EVA is not enabled.
+ */
+#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr)
+#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr)
+
#define EX(insn,reg,addr,handler) \
-9: insn reg, addr; \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .else; \
+9: ___BUILD_EVA_INSN(insn, reg, addr); \
+ .endif; \
.section __ex_table,"a"; \
PTR 9b, handler; \
.previous
- .macro f_fill64 dst, offset, val, fixup
+ .macro f_fill64 dst, offset, val, fixup, mode
EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
@@ -63,34 +77,24 @@
#endif
.endm
-/*
- * memset(void *s, int c, size_t n)
- *
- * a0: start of area to clear
- * a1: char to fill with
- * a2: size of area to clear
- */
.set noreorder
.align 5
-LEAF(memset)
- beqz a1, 1f
- move v0, a0 /* result */
- andi a1, 0xff /* spread fillword */
- LONG_SLL t1, a1, 8
- or a1, t1
- LONG_SLL t1, a1, 16
-#if LONGSIZE == 8
- or a1, t1
- LONG_SLL t1, a1, 32
-#endif
- or a1, t1
-1:
+ /*
+ * Macro to generate the __bzero{,_user} symbol
+ * Arguments:
+ * mode: LEGACY_MODE or EVA_MODE
+ */
+ .macro __BUILD_BZERO mode
+ /* Initialize __memset if this is the first time we call this macro */
+ .ifnotdef __memset
+ .set __memset, 1
+ .hidden __memset /* Make sure it does not leak */
+ .endif
-FEXPORT(__bzero)
sltiu t0, a2, STORSIZE /* very small region? */
- bnez t0, .Lsmall_memset
- andi t0, a0, STORMASK /* aligned? */
+ bnez t0, .Lsmall_memset\@
+ andi t0, a0, STORMASK /* aligned? */
#ifdef CONFIG_CPU_MICROMIPS
move t8, a1 /* used by 'swp' instruction */
@@ -98,39 +102,39 @@ FEXPORT(__bzero)
#endif
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
beqz t0, 1f
- PTR_SUBU t0, STORSIZE /* alignment in bytes */
+ PTR_SUBU t0, STORSIZE /* alignment in bytes */
#else
.set noat
li AT, STORSIZE
beqz t0, 1f
- PTR_SUBU t0, AT /* alignment in bytes */
+ PTR_SUBU t0, AT /* alignment in bytes */
.set at
#endif
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
- EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
+ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
#endif
#ifdef __MIPSEL__
- EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
+ EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
#endif
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
- beqz t1, .Lmemset_partial /* no block to fill */
- andi t0, a2, 0x40-STORSIZE
+ beqz t1, .Lmemset_partial\@ /* no block to fill */
+ andi t0, a2, 0x40-STORSIZE
PTR_ADDU t1, a0 /* end address */
.set reorder
1: PTR_ADDIU a0, 64
R10KCBARRIER(0(ra))
- f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
+ f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
bne t1, a0, 1b
.set noreorder
-.Lmemset_partial:
+.Lmemset_partial\@:
R10KCBARRIER(0(ra))
PTR_LA t1, 2f /* where to start */
#ifdef CONFIG_CPU_MICROMIPS
@@ -145,60 +149,100 @@ FEXPORT(__bzero)
.set at
#endif
jr t1
- PTR_ADDU a0, t0 /* dest ptr */
+ PTR_ADDU a0, t0 /* dest ptr */
.set push
.set noreorder
.set nomacro
- f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */
+ /* ... but first do longs ... */
+ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
2: .set pop
andi a2, STORMASK /* At most one long to go */
beqz a2, 1f
- PTR_ADDU a0, a2 /* What's left */
+ PTR_ADDU a0, a2 /* What's left */
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
- EX(LONG_S_R, a1, -1(a0), .Llast_fixup)
+ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
#endif
#ifdef __MIPSEL__
- EX(LONG_S_L, a1, -1(a0), .Llast_fixup)
+ EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
#endif
1: jr ra
- move a2, zero
+ move a2, zero
-.Lsmall_memset:
+.Lsmall_memset\@:
beqz a2, 2f
- PTR_ADDU t1, a0, a2
+ PTR_ADDU t1, a0, a2
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
bne t1, a0, 1b
- sb a1, -1(a0)
+ sb a1, -1(a0)
2: jr ra /* done */
- move a2, zero
+ move a2, zero
+ .if __memset == 1
END(memset)
+ .set __memset, 0
+ .hidden __memset
+ .endif
-.Lfirst_fixup:
+.Lfirst_fixup\@:
jr ra
- nop
+ nop
-.Lfwd_fixup:
+.Lfwd_fixup\@:
PTR_L t0, TI_TASK($28)
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
- LONG_SUBU a2, t0
+ LONG_SUBU a2, t0
-.Lpartial_fixup:
+.Lpartial_fixup\@:
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
- LONG_SUBU a2, t0
+ LONG_SUBU a2, t0
-.Llast_fixup:
+.Llast_fixup\@:
jr ra
- andi v1, a2, STORMASK
+ andi v1, a2, STORMASK
+
+ .endm
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+
+LEAF(memset)
+ beqz a1, 1f
+ move v0, a0 /* result */
+
+ andi a1, 0xff /* spread fillword */
+ LONG_SLL t1, a1, 8
+ or a1, t1
+ LONG_SLL t1, a1, 16
+#if LONGSIZE == 8
+ or a1, t1
+ LONG_SLL t1, a1, 32
+#endif
+ or a1, t1
+1:
+#ifndef CONFIG_EVA
+FEXPORT(__bzero)
+#endif
+ __BUILD_BZERO LEGACY_MODE
+
+#ifdef CONFIG_EVA
+LEAF(__bzero)
+ __BUILD_BZERO EVA_MODE
+END(__bzero)
+#endif
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index e362dcdc69d1..bef65c98df59 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -22,19 +22,43 @@
*
* Return 0 for error
*/
-LEAF(__strlen_user_asm)
+ .macro __BUILD_STRLEN_ASM func
+LEAF(__strlen_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a0
- bnez v0, .Lfault
+ bnez v0, .Lfault\@
-FEXPORT(__strlen_user_nocheck_asm)
+FEXPORT(__strlen_\func\()_nocheck_asm)
move v0, a0
-1: EX(lbu, v1, (v0), .Lfault)
+.ifeqs "\func", "kernel"
+1: EX(lbu, v1, (v0), .Lfault\@)
+.else
+1: EX(lbue, v1, (v0), .Lfault\@)
+.endif
PTR_ADDIU v0, 1
bnez v1, 1b
PTR_SUBU v0, a0
jr ra
- END(__strlen_user_asm)
+ END(__strlen_\func\()_asm)
-.Lfault: move v0, zero
+.Lfault\@: move v0, zero
jr ra
+ .endm
+
+#ifndef CONFIG_EVA
+ /* Set aliases */
+ .global __strlen_user_asm
+ .global __strlen_user_nocheck_asm
+ .set __strlen_user_asm, __strlen_kernel_asm
+ .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm
+#endif
+
+__BUILD_STRLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+ .set push
+ .set eva
+__BUILD_STRLEN_ASM user
+ .set pop
+#endif
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 92870b6b53ea..d3301cd1e9a5 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -28,16 +28,21 @@
* it happens at most some bytes of the exceptions handlers will be copied.
*/
-LEAF(__strncpy_from_user_asm)
+ .macro __BUILD_STRNCPY_ASM func
+LEAF(__strncpy_from_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a1
- bnez v0, .Lfault
+ bnez v0, .Lfault\@
-FEXPORT(__strncpy_from_user_nocheck_asm)
+FEXPORT(__strncpy_from_\func\()_nocheck_asm)
.set noreorder
move t0, zero
move v1, a1
-1: EX(lbu, v0, (v1), .Lfault)
+.ifeqs "\func","kernel"
+1: EX(lbu, v0, (v1), .Lfault\@)
+.else
+1: EX(lbue, v0, (v1), .Lfault\@)
+.endif
PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
beqz v0, 2f
@@ -47,15 +52,34 @@ FEXPORT(__strncpy_from_user_nocheck_asm)
PTR_ADDIU a0, 1
2: PTR_ADDU v0, a1, t0
xor v0, a1
- bltz v0, .Lfault
+ bltz v0, .Lfault\@
nop
jr ra # return n
move v0, t0
- END(__strncpy_from_user_asm)
+ END(__strncpy_from_\func\()_asm)
-.Lfault: jr ra
+.Lfault\@: jr ra
li v0, -EFAULT
.section __ex_table,"a"
- PTR 1b, .Lfault
+ PTR 1b, .Lfault\@
.previous
+
+ .endm
+
+#ifndef CONFIG_EVA
+ /* Set aliases */
+ .global __strncpy_from_user_asm
+ .global __strncpy_from_user_nocheck_asm
+ .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
+ .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNCPY_ASM kernel
+
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+__BUILD_STRNCPY_ASM user
+ .set pop
+#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index fcacea5e61f1..f3af6995e2a6 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -25,22 +25,46 @@
* bytes. There's nothing secret there. On 64-bit accessing beyond
* the maximum is a tad hairier ...
*/
-LEAF(__strnlen_user_asm)
+ .macro __BUILD_STRNLEN_ASM func
+LEAF(__strnlen_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a0
- bnez v0, .Lfault
+ bnez v0, .Lfault\@
-FEXPORT(__strnlen_user_nocheck_asm)
+FEXPORT(__strnlen_\func\()_nocheck_asm)
move v0, a0
PTR_ADDU a1, a0 # stop pointer
1: beq v0, a1, 1f # limit reached?
- EX(lb, t0, (v0), .Lfault)
+.ifeqs "\func", "kernel"
+ EX(lb, t0, (v0), .Lfault\@)
+.else
+ EX(lbe, t0, (v0), .Lfault\@)
+.endif
PTR_ADDIU v0, 1
bnez t0, 1b
1: PTR_SUBU v0, a0
jr ra
- END(__strnlen_user_asm)
+ END(__strnlen_\func\()_asm)
-.Lfault:
+.Lfault\@:
move v0, zero
jr ra
+ .endm
+
+#ifndef CONFIG_EVA
+ /* Set aliases */
+ .global __strnlen_user_asm
+ .global __strnlen_user_nocheck_asm
+ .set __strnlen_user_asm, __strnlen_kernel_asm
+ .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+ .set push
+ .set eva
+__BUILD_STRNLEN_ASM user
+ .set pop
+#endif