aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTao Guo <glorioustao@gmail.com>2012-09-26 04:28:22 -0400
committerIngo Molnar <mingo@kernel.org>2012-09-26 13:35:32 +0200
commit1b2b23d8573076a587ed2081e0d2b69691079e0e (patch)
tree664d3b567fe4b8f75529c4ab64755961d3d68343 /arch/x86
parentx86: Use REP BSF unconditionally (diff)
downloadlinux-dev-1b2b23d8573076a587ed2081e0d2b69691079e0e.tar.xz
linux-dev-1b2b23d8573076a587ed2081e0d2b69691079e0e.zip
x86_64: Work around old GAS bug
GAS in binutils(2.16.91) could not parse parentheses within macro parameters unless fully parenthesized, and this is a workaround to make old gas work without generating below errors: arch/x86/kernel/entry_64.S: Assembler messages: arch/x86/kernel/entry_64.S:387: Error: too many positional arguments arch/x86/kernel/entry_64.S:389: Error: too many positional arguments [...] Signed-off-by: Tao Guo <glorioustao@gmail.com> Reluctantly-Acked-by: Jan Beulich <jbeulich@novell.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/1348648102-12653-1-git-send-email-glorioustao@gmail.com [ Jan argues that these old GAS versions are fragile - which is so, but lets give them a chance. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/calling.h48
-rw-r--r--arch/x86/kernel/entry_64.S20
2 files changed, 33 insertions, 35 deletions
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index a9e3a740f697..7f8422a28a46 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -49,38 +49,36 @@ For 32-bit we have the following conventions - kernel is built with
#include "dwarf2.h"
/*
- * 64-bit system call stack frame layout defines and helpers, for
- * assembly code (note that the seemingly unnecessary parentheses
- * are to prevent cpp from inserting spaces in expressions that get
- * passed to macros):
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
*/
-#define R15 (0)
-#define R14 (8)
-#define R13 (16)
-#define R12 (24)
-#define RBP (32)
-#define RBX (40)
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
/* arguments: interrupts/non tracing syscalls only save up to here: */
-#define R11 (48)
-#define R10 (56)
-#define R9 (64)
-#define R8 (72)
-#define RAX (80)
-#define RCX (88)
-#define RDX (96)
-#define RSI (104)
-#define RDI (112)
-#define ORIG_RAX (120) /* + error_code */
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall: */
-#define RIP (128)
-#define CS (136)
-#define EFLAGS (144)
-#define RSP (152)
-#define SS (160)
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b1dac12dc5e6..2c6706167c8d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -342,15 +342,15 @@ ENDPROC(native_usergs_sysret64)
.macro SAVE_ARGS_IRQ
cld
/* start from rbp in pt_regs and jump over */
- movq_cfi rdi, RDI-RBP
- movq_cfi rsi, RSI-RBP
- movq_cfi rdx, RDX-RBP
- movq_cfi rcx, RCX-RBP
- movq_cfi rax, RAX-RBP
- movq_cfi r8, R8-RBP
- movq_cfi r9, R9-RBP
- movq_cfi r10, R10-RBP
- movq_cfi r11, R11-RBP
+ movq_cfi rdi, (RDI-RBP)
+ movq_cfi rsi, (RSI-RBP)
+ movq_cfi rdx, (RDX-RBP)
+ movq_cfi rcx, (RCX-RBP)
+ movq_cfi rax, (RAX-RBP)
+ movq_cfi r8, (R8-RBP)
+ movq_cfi r9, (R9-RBP)
+ movq_cfi r10, (R10-RBP)
+ movq_cfi r11, (R11-RBP)
/* Save rbp so that we can unwind from get_irq_regs() */
movq_cfi rbp, 0
@@ -384,7 +384,7 @@ ENDPROC(native_usergs_sysret64)
.endm
ENTRY(save_rest)
- PARTIAL_FRAME 1 REST_SKIP+8
+ PARTIAL_FRAME 1 (REST_SKIP+8)
movq 5*8+16(%rsp), %r11 /* save return address */
movq_cfi rbx, RBX+16
movq_cfi rbp, RBP+16