aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/assembler.h
diff options
context:
space:
mode:
authorGeoff Levand <geoff@infradead.org>2016-04-27 17:47:10 +0100
committerWill Deacon <will.deacon@arm.com>2016-04-28 12:05:46 +0100
commit5003dbde45961dd7ab3d8a09ab9ad8bcb604db40 (patch)
tree84847e1eb60d698038a9fb900e8fbf0dfccbfd3c /arch/arm64/include/asm/assembler.h
parentarm64: Promote KERNEL_START/KERNEL_END definitions to a header file (diff)
downloadlinux-dev-5003dbde45961dd7ab3d8a09ab9ad8bcb604db40.tar.xz
linux-dev-5003dbde45961dd7ab3d8a09ab9ad8bcb604db40.zip
arm64: Add new asm macro copy_page
Kexec and hibernate need to copy pages of memory, but may not have all of the kernel mapped, and are unable to call copy_page(). Add a simplistic copy_page() macro, that can be inlined in these situations. lib/copy_page.S provides a bigger better version, but uses more registers. Signed-off-by: Geoff Levand <geoff@infradead.org> [Changed asm label to 9998, added commit message] Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to '')
-rw-r--r--arch/arm64/include/asm/assembler.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3b3812a3e8b9..10b017c4bdd8 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -24,6 +24,7 @@
#define __ASM_ASSEMBLER_H
#include <asm/asm-offsets.h>
+#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -280,6 +281,24 @@ lr .req x30 // link register
.endm
/*
+ * copy_page - copy src to dest using temp registers t1-t8
+ */
+ .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
+9998: ldp \t1, \t2, [\src]
+ ldp \t3, \t4, [\src, #16]
+ ldp \t5, \t6, [\src, #32]
+ ldp \t7, \t8, [\src, #48]
+ add \src, \src, #64
+ stnp \t1, \t2, [\dest]
+ stnp \t3, \t4, [\dest, #16]
+ stnp \t5, \t6, [\dest, #32]
+ stnp \t7, \t8, [\dest, #48]
+ add \dest, \dest, #64
+ tst \src, #(PAGE_SIZE - 1)
+ b.ne 9998b
+ .endm
+
+/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
*/