aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-06 16:04:13 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-08-13 16:04:10 -0400
commit2f9ac29eec71a696cb0dcc5fb82c0f8d4dac28c9 (patch)
treeee33ba7e452e8614130a811211eb2383a3133194 /arch/tile/include
parenttile: remove unnecessary backslashes in asm-offsets.c (diff)
downloadlinux-dev-2f9ac29eec71a696cb0dcc5fb82c0f8d4dac28c9.tar.xz
linux-dev-2f9ac29eec71a696cb0dcc5fb82c0f8d4dac28c9.zip
tile: fast-path unaligned memory access for tilegx
This change enables unaligned userspace memory access via a kernel fast path on tilegx. The kernel tracks user PC/instruction pairs per-thread using a direct-mapped cache in userspace. The cache maps those PC/instruction pairs to JIT'ed instruction sequences that load or store using byte-wide load store intructions and then synthesize 2-, 4- or 8-byte load or store results. Once an instruction has been seen to generate an unaligned access once, subsequent hits on that instruction typically require overhead of only around 50 cycles if cache and TLB is hot. We support the prctl() PR_GET_UNALIGN / PR_SET_UNALIGN sys call to enable or disable unaligned fixups on a per-process basis. To do this we pull some of the tilepro unaligned support out of the single_step.c file; tilepro uses instruction disassembly for both single-step and unaligned access support. Since tilegx actually has hardware singlestep support, though, it's cleaner to keep the tilegx unaligned access code in a separate file. While we're at it, properly rename the tilepro-specific types, etc., to have tilepro suffixes instead of generic tile suffixes. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/asm/processor.h7
-rw-r--r--arch/tile/include/asm/ptrace.h3
-rw-r--r--arch/tile/include/asm/sections.h4
-rw-r--r--arch/tile/include/asm/thread_info.h6
-rw-r--r--arch/tile/include/asm/traps.h11
5 files changed, 28 insertions, 3 deletions
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index b3f104953da2..cda27243fb09 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -247,6 +247,13 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(task) task_pc(task)
#define KSTK_ESP(task) task_sp(task)
+/* Fine-grained unaligned JIT support */
+#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
/* Standard format for printing registers and other word-size data. */
#ifdef __tilegx__
# define REGFMT "0x%016lx"
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index fd412260aff7..73b681b566f7 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -79,8 +79,7 @@ extern void single_step_execve(void);
struct task_struct;
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- int error_code);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
#ifdef __tilegx__
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 7d8a935a9238..cc95276ef9c9 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -28,7 +28,9 @@ extern char __w1data_begin[], __w1data_end[];
/* Not exactly sections, but PC comparison points in the code. */
extern char __rt_sigreturn[], __rt_sigreturn_end[];
-#ifndef __tilegx__
+#ifdef __tilegx__
+extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
+#else
extern char sys_cmpxchg[], __sys_cmpxchg_end[];
extern char __sys_cmpxchg_grab_lock[];
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index d1733dee98a2..b8aa6df3e102 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -39,6 +39,11 @@ struct thread_info {
struct restart_block restart_block;
struct single_step_state *step_state; /* single step state
(if non-zero) */
+ int align_ctl; /* controls unaligned access */
+#ifdef __tilegx__
+ unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
+ void __user *unalign_jit_base; /* unalign fixup JIT base */
+#endif
};
/*
@@ -56,6 +61,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \
}, \
.step_state = NULL, \
+ .align_ctl = 0, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index e28c3df4176a..5f172b2403a6 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -15,6 +15,7 @@
#ifndef _ASM_TILE_TRAPS_H
#define _ASM_TILE_TRAPS_H
+#ifndef __ASSEMBLY__
#include <arch/chip.h>
/* mm/fault.c */
@@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num);
/* kernel/intvec_64.S */
void fill_ra_stack(void);
+
+/* Handle unalign data fixup. */
+extern void do_unaligned(struct pt_regs *regs, int vecnum);
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __tilegx__
+/* 128 byte JIT per unalign fixup. */
+#define UNALIGN_JIT_SHIFT 7
#endif
#endif /* _ASM_TILE_TRAPS_H */