aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/copy_page.S
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-11-27 12:06:26 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-12-06 10:45:39 +0900
commit510c72ad2dd4e05e6908755f51ac89482c6eb987 (patch)
treefa2e9e9a674e38dd523d937329627560f0bd6b64 /arch/sh/mm/copy_page.S
parentsh: Fixup 4K irq stacks. (diff)
downloadlinux-dev-510c72ad2dd4e05e6908755f51ac89482c6eb987.tar.xz
linux-dev-510c72ad2dd4e05e6908755f51ac89482c6eb987.zip
sh: Fixup various PAGE_SIZE == 4096 assumptions.
There were a number of places that made evil PAGE_SIZE == 4k assumptions that ended up breaking when trying to play with 8k and 64k page sizes, this fixes those up. The most significant change is the way we load THREAD_SIZE, previously this was done via: mov #(THREAD_SIZE >> 8), reg shll8 reg to avoid a memory access and allow the immediate load. With a 64k PAGE_SIZE, we're out of range for the immediate load size without resorting to special instructions available in later ISAs (movi20s and so on). The "workaround" for this is to bump up the shift to 10 and insert a shll2, which gives a bit more flexibility while still being much cheaper than a memory access. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/copy_page.S')
-rw-r--r--arch/sh/mm/copy_page.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe117c3..397c94c97315 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
* copy_page, __copy_user_page, __copy_user implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Toshinobu Sugioka
- *
+ * Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
+#include <asm/page.h>
/*
* copy_page_slow
@@ -18,7 +18,7 @@
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- not used
* r10 --- to
* r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
mov r4,r10
mov r5,r11
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: mov.l @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- orig_to
* r10 --- to
* r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
mov r5,r11
mov r6,r9
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: ocbi @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
rts
nop
#endif
-.L4096: .word 4096
+.Lpsz: .long PAGE_SIZE
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied