diff options
Diffstat (limited to 'arch/arm/lib')
-rw-r--r-- | arch/arm/lib/backtrace-clang.S | 25 | ||||
-rw-r--r-- | arch/arm/lib/backtrace.S | 24 | ||||
-rw-r--r-- | arch/arm/lib/call_with_stack.S | 35 | ||||
-rw-r--r-- | arch/arm/lib/copy_from_user.S | 16 | ||||
-rw-r--r-- | arch/arm/lib/copy_template.S | 67 | ||||
-rw-r--r-- | arch/arm/lib/copy_to_user.S | 16 | ||||
-rw-r--r-- | arch/arm/lib/csumpartialcopy.S | 4 | ||||
-rw-r--r-- | arch/arm/lib/csumpartialcopygeneric.S | 1 | ||||
-rw-r--r-- | arch/arm/lib/csumpartialcopyuser.S | 26 | ||||
-rw-r--r-- | arch/arm/lib/findbit.S | 16 | ||||
-rw-r--r-- | arch/arm/lib/memcpy.S | 17 | ||||
-rw-r--r-- | arch/arm/lib/memmove.S | 66 | ||||
-rw-r--r-- | arch/arm/lib/memset.S | 11 | ||||
-rw-r--r-- | arch/arm/lib/uaccess_with_memcpy.c | 35 | ||||
-rw-r--r-- | arch/arm/lib/xor-neon.c | 12 |
15 files changed, 173 insertions, 198 deletions
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S index 2ff375144b55..290c52a60fc6 100644 --- a/arch/arm/lib/backtrace-clang.S +++ b/arch/arm/lib/backtrace-clang.S @@ -17,6 +17,7 @@ #define sv_pc r6 #define mask r7 #define sv_lr r8 +#define loglvl r9 ENTRY(c_backtrace) @@ -99,6 +100,7 @@ ENDPROC(c_backtrace) @ to ensure 8 byte alignment movs frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames + mov loglvl, r2 tst r1, #0x10 @ 26 or 32-bit mode? moveq mask, #0xfc000003 movne mask, #0 @ mask for 32-bit @@ -142,7 +144,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions */ 1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame - ldr r0, [sv_lr, #-4] @ get call instruction +1004: ldr r0, [sv_lr, #-4] @ get call instruction ldr r3, .Lopcode+4 and r2, r3, r0 @ is this a bl call teq r2, r3 @@ -162,11 +164,12 @@ finished_setup: /* * Print the function (sv_pc) and where it was called from (sv_lr). */ -1004: mov r0, sv_pc + mov r0, sv_pc mov r1, sv_lr mov r2, frame bic r1, r1, mask @ mask PC/LR for the mode + mov r3, loglvl bl dump_backtrace_entry /* @@ -183,6 +186,7 @@ finished_setup: ldr r0, [frame] @ locals are stored in @ the preceding frame subeq r0, r0, #4 + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers /* @@ -193,11 +197,20 @@ finished_setup: cmp sv_fp, frame @ next frame must be mov frame, sv_fp @ above the current frame +#ifdef CONFIG_IRQSTACKS + @ + @ Kernel stacks may be discontiguous in memory. If the next + @ frame is below the previous frame, accept it as long as it + @ lives in kernel memory. + @ + cmpls sv_fp, #PAGE_OFFSET +#endif bhi for_each_frame 1006: adr r0, .Lbad - mov r1, frame - bl printk + mov r1, loglvl + mov r2, frame + bl _printk no_frame: ldmfd sp!, {r4 - r9, fp, pc} ENDPROC(c_backtrace) .pushsection __ex_table,"a" @@ -205,11 +218,11 @@ ENDPROC(c_backtrace) .long 1001b, 1006b .long 1002b, 1006b .long 1003b, 1006b - .long 1004b, 1006b + .long 1004b, finished_setup .long 1005b, 1006b .popsection -.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" +.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n" .align .Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr} .word 0x0b000000 @ bl if these bits are set diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index 582925238d65..293a2716bd20 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -18,6 +18,7 @@ #define sv_pc r6 #define mask r7 #define offset r8 +#define loglvl r9 ENTRY(c_backtrace) @@ -25,9 +26,10 @@ ENTRY(c_backtrace) ret lr ENDPROC(c_backtrace) #else - stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... + stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location... movs frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames + mov loglvl, r2 tst r1, #0x10 @ 26 or 32-bit mode? ARM( moveq mask, #0xfc000003 ) @@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions ldr r1, [frame, #-4] @ get saved lr mov r2, frame bic r1, r1, mask @ mask PC/LR for the mode + mov r3, loglvl bl dump_backtrace_entry ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists, @@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions teq r3, r1, lsr #11 ldreq r0, [frame, #-8] @ get sp subeq r0, r0, #4 @ point at the last arg + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} ldr r3, .Ldsi @ instruction exists, teq r3, r1, lsr #11 subeq r0, frame, #16 + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers teq sv_fp, #0 @ zero saved fp means @@ -93,12 +98,21 @@ for_each_frame: tst frame, mask @ Check for address exceptions cmp sv_fp, frame @ next frame must be mov frame, sv_fp @ above the current frame +#ifdef CONFIG_IRQSTACKS + @ + @ Kernel stacks may be discontiguous in memory. If the next + @ frame is below the previous frame, accept it as long as it + @ lives in kernel memory. + @ + cmpls sv_fp, #PAGE_OFFSET +#endif bhi for_each_frame 1006: adr r0, .Lbad - mov r1, frame - bl printk -no_frame: ldmfd sp!, {r4 - r8, pc} + mov r1, loglvl + mov r2, frame + bl _printk +no_frame: ldmfd sp!, {r4 - r9, pc} ENDPROC(c_backtrace) .pushsection __ex_table,"a" @@ -109,7 +123,7 @@ ENDPROC(c_backtrace) .long 1004b, 1006b .popsection -.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" +.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n" .align .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} .word 0xe92d0000 >> 11 @ stmfd sp!, {} diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S index 28b0341ae786..5030d4e8d126 100644 --- a/arch/arm/lib/call_with_stack.S +++ b/arch/arm/lib/call_with_stack.S @@ -8,25 +8,44 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/unwind.h> /* * void call_with_stack(void (*fn)(void *), void *arg, void *sp) * * Change the stack to that pointed at by sp, then invoke fn(arg) with * the new stack. + * + * The sequence below follows the APCS frame convention for frame pointer + * unwinding, and implements the unwinder annotations needed by the EABI + * unwinder. */ -ENTRY(call_with_stack) - str sp, [r2, #-4]! - str lr, [r2, #-4]! +ENTRY(call_with_stack) +#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) + mov ip, sp + push {fp, ip, lr, pc} + sub fp, ip, #4 +#else +UNWIND( .fnstart ) +UNWIND( .save {fpreg, lr} ) + push {fpreg, lr} +UNWIND( .setfp fpreg, sp ) + mov fpreg, sp +#endif mov sp, r2 mov r2, r0 mov r0, r1 - badr lr, 1f - ret r2 + bl_r r2 -1: ldr lr, [sp] - ldr sp, [sp, #4] - ret lr +#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) + ldmdb fp, {fp, sp, pc} +#else + mov sp, fpreg + pop {fpreg, pc} +UNWIND( .fnend ) +#endif + .globl call_with_stack_end +call_with_stack_end: ENDPROC(call_with_stack) diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index f8016e3db65d..270de7debd0f 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -91,26 +91,22 @@ strb\cond \reg, [\ptr], #1 .endm - .macro enter reg1 reg2 + .macro enter regs:vararg mov r3, #0 - stmdb sp!, {r0, r2, r3, \reg1, \reg2} +UNWIND( .save {r0, r2, r3, \regs} ) + stmdb sp!, {r0, r2, r3, \regs} .endm - .macro usave reg1 reg2 - UNWIND( .save {r0, r2, r3, \reg1, \reg2} ) - .endm - - .macro exit reg1 reg2 + .macro exit regs:vararg add sp, sp, #8 - ldmfd sp!, {r0, \reg1, \reg2} + ldmfd sp!, {r0, \regs} .endm .text ENTRY(arm_copy_from_user) #ifdef CONFIG_CPU_SPECTRE - get_thread_info r3 - ldr r3, [r3, #TI_ADDR_LIMIT] + ldr r3, =TASK_SIZE uaccess_mask_range_ptr r1, r2, r3, ip #endif diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S index 810a805d36dc..8fbafb074fe9 100644 --- a/arch/arm/lib/copy_template.S +++ b/arch/arm/lib/copy_template.S @@ -69,13 +69,10 @@ * than one 32bit instruction in Thumb-2) */ - - UNWIND( .fnstart ) - enter r4, lr - UNWIND( .fnend ) - UNWIND( .fnstart ) - usave r4, lr @ in first stmdb block + enter r4, UNWIND(fpreg,) lr + UNWIND( .setfp fpreg, sp ) + UNWIND( mov fpreg, sp ) subs r2, r2, #4 blt 8f @@ -86,12 +83,7 @@ bne 10f 1: subs r2, r2, #(28) - stmfd sp!, {r5 - r8} - UNWIND( .fnend ) - - UNWIND( .fnstart ) - usave r4, lr - UNWIND( .save {r5 - r8} ) @ in second stmfd block + stmfd sp!, {r5, r6, r8, r9} blt 5f CALGN( ands ip, r0, #31 ) @@ -110,9 +102,9 @@ PLD( pld [r1, #92] ) 3: PLD( pld [r1, #124] ) -4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f +4: ldr8w r1, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f subs r2, r2, #32 - str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f + str8w r0, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f bge 3b PLD( cmn r2, #96 ) PLD( bge 4b ) @@ -132,8 +124,8 @@ ldr1w r1, r4, abort=20f ldr1w r1, r5, abort=20f ldr1w r1, r6, abort=20f - ldr1w r1, r7, abort=20f ldr1w r1, r8, abort=20f + ldr1w r1, r9, abort=20f ldr1w r1, lr, abort=20f #if LDR1W_SHIFT < STR1W_SHIFT @@ -150,17 +142,14 @@ str1w r0, r4, abort=20f str1w r0, r5, abort=20f str1w r0, r6, abort=20f - str1w r0, r7, abort=20f str1w r0, r8, abort=20f + str1w r0, r9, abort=20f str1w r0, lr, abort=20f CALGN( bcs 2b ) -7: ldmfd sp!, {r5 - r8} - UNWIND( .fnend ) @ end of second stmfd block +7: ldmfd sp!, {r5, r6, r8, r9} - UNWIND( .fnstart ) - usave r4, lr @ still in first stmdb block 8: movs r2, r2, lsl #31 ldr1b r1, r3, ne, abort=21f ldr1b r1, r4, cs, abort=21f @@ -169,7 +158,7 @@ str1b r0, r4, cs, abort=21f str1b r0, ip, cs, abort=21f - exit r4, pc + exit r4, UNWIND(fpreg,) pc 9: rsb ip, ip, #4 cmp ip, #2 @@ -189,13 +178,10 @@ ldr1w r1, lr, abort=21f beq 17f bgt 18f - UNWIND( .fnend ) .macro forward_copy_shift pull push - UNWIND( .fnstart ) - usave r4, lr @ still in first stmdb block subs r2, r2, #28 blt 14f @@ -205,12 +191,8 @@ CALGN( subcc r2, r2, ip ) CALGN( bcc 15f ) -11: stmfd sp!, {r5 - r9} - UNWIND( .fnend ) +11: stmfd sp!, {r5, r6, r8 - r10} - UNWIND( .fnstart ) - usave r4, lr - UNWIND( .save {r5 - r9} ) @ in new second stmfd block PLD( pld [r1, #0] ) PLD( subs r2, r2, #96 ) PLD( pld [r1, #28] ) @@ -219,35 +201,32 @@ PLD( pld [r1, #92] ) 12: PLD( pld [r1, #124] ) -13: ldr4w r1, r4, r5, r6, r7, abort=19f +13: ldr4w r1, r4, r5, r6, r8, abort=19f mov r3, lr, lspull #\pull subs r2, r2, #32 - ldr4w r1, r8, r9, ip, lr, abort=19f + ldr4w r1, r9, r10, ip, lr, abort=19f orr r3, r3, r4, lspush #\push mov r4, r4, lspull #\pull orr r4, r4, r5, lspush #\push mov r5, r5, lspull #\pull orr r5, r5, r6, lspush #\push mov r6, r6, lspull #\pull - orr r6, r6, r7, lspush #\push - mov r7, r7, lspull #\pull - orr r7, r7, r8, lspush #\push + orr r6, r6, r8, lspush #\push mov r8, r8, lspull #\pull orr r8, r8, r9, lspush #\push mov r9, r9, lspull #\pull - orr r9, r9, ip, lspush #\push + orr r9, r9, r10, lspush #\push + mov r10, r10, lspull #\pull + orr r10, r10, ip, lspush #\push mov ip, ip, lspull #\pull orr ip, ip, lr, lspush #\push - str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f + str8w r0, r3, r4, r5, r6, r8, r9, r10, ip, abort=19f bge 12b PLD( cmn r2, #96 ) PLD( bge 13b ) - ldmfd sp!, {r5 - r9} - UNWIND( .fnend ) @ end of the second stmfd block + ldmfd sp!, {r5, r6, r8 - r10} - UNWIND( .fnstart ) - usave r4, lr @ still in first stmdb block 14: ands ip, r2, #28 beq 16f @@ -262,7 +241,6 @@ 16: sub r1, r1, #(\push / 8) b 8b - UNWIND( .fnend ) .endm @@ -273,6 +251,7 @@ 18: forward_copy_shift pull=24 push=8 + UNWIND( .fnend ) /* * Abort preamble and completion macros. @@ -282,13 +261,13 @@ */ .macro copy_abort_preamble -19: ldmfd sp!, {r5 - r9} +19: ldmfd sp!, {r5, r6, r8 - r10} b 21f -20: ldmfd sp!, {r5 - r8} +20: ldmfd sp!, {r5, r6, r8, r9} 21: .endm .macro copy_abort_end - ldmfd sp!, {r4, pc} + ldmfd sp!, {r4, UNWIND(fpreg,) pc} .endm diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index ebfe4cb3d912..fac49e57cc0b 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -90,18 +90,15 @@ strusr \reg, \ptr, 1, \cond, abort=\abort .endm - .macro enter reg1 reg2 + .macro enter regs:vararg mov r3, #0 - stmdb sp!, {r0, r2, r3, \reg1, \reg2} +UNWIND( .save {r0, r2, r3, \regs} ) + stmdb sp!, {r0, r2, r3, \regs} .endm - .macro usave reg1 reg2 - UNWIND( .save {r0, r2, r3, \reg1, \reg2} ) - .endm - - .macro exit reg1 reg2 + .macro exit regs:vararg add sp, sp, #8 - ldmfd sp!, {r0, \reg1, \reg2} + ldmfd sp!, {r0, \regs} .endm .text @@ -109,8 +106,7 @@ ENTRY(__copy_to_user_std) WEAK(arm_copy_to_user) #ifdef CONFIG_CPU_SPECTRE - get_thread_info r3 - ldr r3, [r3, #TI_ADDR_LIMIT] + ldr r3, =TASK_SIZE uaccess_mask_range_ptr r0, r2, r3, ip #endif diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S index 184d97254a7a..1ca6aadd649c 100644 --- a/arch/arm/lib/csumpartialcopy.S +++ b/arch/arm/lib/csumpartialcopy.S @@ -9,8 +9,8 @@ .text -/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum) - * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum +/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len) + * Params : r0 = src, r1 = dst, r2 = len * Returns : r0 = new checksum */ diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S index 0b706a39a677..0fd5c10e90a7 100644 --- a/arch/arm/lib/csumpartialcopygeneric.S +++ b/arch/arm/lib/csumpartialcopygeneric.S @@ -86,6 +86,7 @@ sum .req r3 FN_ENTRY save_regs + mov sum, #-1 cmp len, #8 @ Ensure that we have at least blo .Lless8 @ 8 bytes to copy. diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 6bd3a93eaa3c..6928781e6bee 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -62,9 +62,9 @@ /* * unsigned int - * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr) - * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr - * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + * csum_partial_copy_from_user(const char *src, char *dst, int len) + * r0 = src, r1 = dst, r2 = len + * Returns : r0 = checksum or 0 */ #define FN_ENTRY ENTRY(csum_partial_copy_from_user) @@ -73,25 +73,11 @@ #include "csumpartialcopygeneric.S" /* - * FIXME: minor buglet here - * We don't return the checksum for the data present in the buffer. To do - * so properly, we would have to add in whatever registers were loaded before - * the fault, which, with the current asm above is not predictable. + * We report fault by returning 0 csum - impossible in normal case, since + * we start with 0xffffffff for initial sum. */ .pushsection .text.fixup,"ax" .align 4 -9001: mov r4, #-EFAULT -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r5, [sp, #9*4] @ *err_ptr -#else - ldr r5, [sp, #8*4] @ *err_ptr -#endif - str r4, [r5] - ldmia sp, {r1, r2} @ retrieve dst, len - add r2, r2, r1 - mov r0, #0 @ zero the buffer -9002: teq r2, r1 - strbne r0, [r1], #1 - bne 9002b +9001: mov r0, #0 load_regs .popsection diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index b5e8b9ae4c7d..7fd3600db8ef 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S @@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le) * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_zero_bit_le) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) @@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le) * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_bit_le) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) @@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be) ENDPROC(_find_first_zero_bit_be) ENTRY(_find_next_zero_bit_be) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering @@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be) ENDPROC(_find_first_bit_be) ENTRY(_find_next_bit_be) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 09a333153dc6..90f2b645aa0d 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -42,26 +42,25 @@ strb\cond \reg, [\ptr], #1 .endm - .macro enter reg1 reg2 - stmdb sp!, {r0, \reg1, \reg2} + .macro enter regs:vararg +UNWIND( .save {r0, \regs} ) + stmdb sp!, {r0, \regs} .endm - .macro usave reg1 reg2 - UNWIND( .save {r0, \reg1, \reg2} ) - .endm - - .macro exit reg1 reg2 - ldmfd sp!, {r0, \reg1, \reg2} + .macro exit regs:vararg + ldmfd sp!, {r0, \regs} .endm .text /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ +ENTRY(__memcpy) ENTRY(mmiocpy) -ENTRY(memcpy) +WEAK(memcpy) #include "copy_template.S" ENDPROC(memcpy) ENDPROC(mmiocpy) +ENDPROC(__memcpy) diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index b50e5770fb44..6410554039fd 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -24,18 +24,20 @@ * occurring in the opposite direction. */ -ENTRY(memmove) +ENTRY(__memmove) +WEAK(memmove) UNWIND( .fnstart ) subs ip, r0, r1 cmphi r2, ip - bls memcpy - - stmfd sp!, {r0, r4, lr} + bls __memcpy UNWIND( .fnend ) UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) @ in first stmfd block + UNWIND( .save {r0, r4, fpreg, lr} ) + stmfd sp!, {r0, r4, UNWIND(fpreg,) lr} + UNWIND( .setfp fpreg, sp ) + UNWIND( mov fpreg, sp ) add r1, r1, r2 add r0, r0, r2 subs r2, r2, #4 @@ -47,12 +49,7 @@ ENTRY(memmove) bne 10f 1: subs r2, r2, #(28) - stmfd sp!, {r5 - r8} - UNWIND( .fnend ) - - UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) - UNWIND( .save {r5 - r8} ) @ in second stmfd block + stmfd sp!, {r5, r6, r8, r9} blt 5f CALGN( ands ip, r0, #31 ) @@ -71,9 +68,9 @@ ENTRY(memmove) PLD( pld [r1, #-96] ) 3: PLD( pld [r1, #-128] ) -4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr} +4: ldmdb r1!, {r3, r4, r5, r6, r8, r9, ip, lr} subs r2, r2, #32 - stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr} + stmdb r0!, {r3, r4, r5, r6, r8, r9, ip, lr} bge 3b PLD( cmn r2, #96 ) PLD( bge 4b ) @@ -87,8 +84,8 @@ ENTRY(memmove) W(ldr) r4, [r1, #-4]! W(ldr) r5, [r1, #-4]! W(ldr) r6, [r1, #-4]! - W(ldr) r7, [r1, #-4]! W(ldr) r8, [r1, #-4]! + W(ldr) r9, [r1, #-4]! W(ldr) lr, [r1, #-4]! add pc, pc, ip @@ -98,17 +95,13 @@ ENTRY(memmove) W(str) r4, [r0, #-4]! W(str) r5, [r0, #-4]! W(str) r6, [r0, #-4]! - W(str) r7, [r0, #-4]! W(str) r8, [r0, #-4]! + W(str) r9, [r0, #-4]! W(str) lr, [r0, #-4]! CALGN( bcs 2b ) -7: ldmfd sp!, {r5 - r8} - UNWIND( .fnend ) @ end of second stmfd block - - UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block +7: ldmfd sp!, {r5, r6, r8, r9} 8: movs r2, r2, lsl #31 ldrbne r3, [r1, #-1]! @@ -117,7 +110,7 @@ ENTRY(memmove) strbne r3, [r0, #-1]! strbcs r4, [r0, #-1]! strbcs ip, [r0, #-1] - ldmfd sp!, {r0, r4, pc} + ldmfd sp!, {r0, r4, UNWIND(fpreg,) pc} 9: cmp ip, #2 ldrbgt r3, [r1, #-1]! @@ -136,13 +129,10 @@ ENTRY(memmove) ldr r3, [r1, #0] beq 17f blt 18f - UNWIND( .fnend ) .macro backward_copy_shift push pull - UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block subs r2, r2, #28 blt 14f @@ -151,12 +141,7 @@ ENTRY(memmove) CALGN( subcc r2, r2, ip ) CALGN( bcc 15f ) -11: stmfd sp!, {r5 - r9} - UNWIND( .fnend ) - - UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) - UNWIND( .save {r5 - r9} ) @ in new second stmfd block +11: stmfd sp!, {r5, r6, r8 - r10} PLD( pld [r1, #-4] ) PLD( subs r2, r2, #96 ) @@ -166,35 +151,31 @@ ENTRY(memmove) PLD( pld [r1, #-96] ) 12: PLD( pld [r1, #-128] ) -13: ldmdb r1!, {r7, r8, r9, ip} +13: ldmdb r1!, {r8, r9, r10, ip} mov lr, r3, lspush #\push subs r2, r2, #32 ldmdb r1!, {r3, r4, r5, r6} orr lr, lr, ip, lspull #\pull mov ip, ip, lspush #\push - orr ip, ip, r9, lspull #\pull + orr ip, ip, r10, lspull #\pull + mov r10, r10, lspush #\push + orr r10, r10, r9, lspull #\pull mov r9, r9, lspush #\push orr r9, r9, r8, lspull #\pull mov r8, r8, lspush #\push - orr r8, r8, r7, lspull #\pull - mov r7, r7, lspush #\push - orr r7, r7, r6, lspull #\pull + orr r8, r8, r6, lspull #\pull mov r6, r6, lspush #\push orr r6, r6, r5, lspull #\pull mov r5, r5, lspush #\push orr r5, r5, r4, lspull #\pull mov r4, r4, lspush #\push orr r4, r4, r3, lspull #\pull - stmdb r0!, {r4 - r9, ip, lr} + stmdb r0!, {r4 - r6, r8 - r10, ip, lr} bge 12b PLD( cmn r2, #96 ) PLD( bge 13b ) - ldmfd sp!, {r5 - r9} - UNWIND( .fnend ) @ end of the second stmfd block - - UNWIND( .fnstart ) - UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block + ldmfd sp!, {r5, r6, r8 - r10} 14: ands ip, r2, #28 beq 16f @@ -210,7 +191,6 @@ ENTRY(memmove) 16: add r1, r1, #(\pull / 8) b 8b - UNWIND( .fnend ) .endm @@ -221,4 +201,6 @@ ENTRY(memmove) 18: backward_copy_shift push=24 pull=8 + UNWIND( .fnend ) ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 6ca4535c47fb..d71ab61430b2 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -13,8 +13,9 @@ .text .align 5 +ENTRY(__memset) ENTRY(mmioset) -ENTRY(memset) +WEAK(memset) UNWIND( .fnstart ) ands r3, r0, #3 @ 1 unaligned? mov ip, r0 @ preserve r0 as return value @@ -27,16 +28,16 @@ UNWIND( .fnstart ) mov r3, r1 7: cmp r2, #16 blt 4f +UNWIND( .fnend ) #if ! CALGN(1)+0 /* * We need 2 extra registers for this loop - use r8 and the LR */ - stmfd sp!, {r8, lr} -UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r8, lr} ) + stmfd sp!, {r8, lr} mov r8, r1 mov lr, r3 @@ -65,10 +66,9 @@ UNWIND( .fnend ) * whole cache lines at once. */ - stmfd sp!, {r4-r8, lr} -UNWIND( .fnend ) UNWIND( .fnstart ) UNWIND( .save {r4-r8, lr} ) + stmfd sp!, {r4-r8, lr} mov r4, r1 mov r5, r3 mov r6, r1 @@ -132,6 +132,7 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) +ENDPROC(__memset) ENTRY(__memset32) UNWIND( .fnstart ) diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index c9450982a155..14eecaaf295f 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -24,6 +24,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) { unsigned long addr = (unsigned long)_addr; pgd_t *pgd; + p4d_t *p4d; pmd_t *pmd; pte_t *pte; pud_t *pud; @@ -33,7 +34,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) return 0; - pud = pud_offset(pgd, addr); + p4d = p4d_offset(pgd, addr); + if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d))) + return 0; + + pud = pud_offset(p4d, addr); if (unlikely(pud_none(*pud) || pud_bad(*pud))) return 0; @@ -87,16 +92,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) unsigned long ua_flags; int atomic; - if (uaccess_kernel()) { - memcpy((void *)to, from, n); - return 0; - } - /* the mmap semaphore is taken only if not in an atomic context */ atomic = faulthandler_disabled(); if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; @@ -104,11 +104,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) while (!pin_page_for_write(to, &pte, &ptl)) { if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)to)) goto out; if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; @@ -128,7 +128,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) spin_unlock(ptl); } if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; @@ -160,22 +160,17 @@ __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; - if (uaccess_kernel()) { - memset((void *)addr, 0, n); - return 0; - } - - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; int tocopy; while (!pin_page_for_write(addr, &pte, &ptl)) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)addr)) goto out; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; @@ -193,7 +188,7 @@ __clear_user_memset(void __user *addr, unsigned long n) else spin_unlock(ptl); } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; @@ -242,7 +237,7 @@ static int __init test_size_treshold(void) if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); - user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); + user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY)); if (!user_ptr) goto no_vmap; diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c index b99dd8e1c93f..522510baed49 100644 --- a/arch/arm/lib/xor-neon.c +++ b/arch/arm/lib/xor-neon.c @@ -17,17 +17,11 @@ MODULE_LICENSE("GPL"); /* * Pull in the reference implementations while instructing GCC (through * -ftree-vectorize) to attempt to exploit implicit parallelism and emit - * NEON instructions. + * NEON instructions. Clang does this by default at O2 so no pragma is + * needed. */ -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#ifdef CONFIG_CC_IS_GCC #pragma GCC optimize "tree-vectorize" -#else -/* - * While older versions of GCC do not generate incorrect code, they fail to - * recognize the parallel nature of these functions, and emit plain ARM code, - * which is known to be slower than the optimized ARM code in asm-arm/xor.h. - */ -#warning This code requires at least version 4.6 of GCC #endif #pragma GCC diagnostic ignored "-Wunused-variable" |