aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm/uaccess_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/uaccess_64.h')
-rw-r--r--arch/sparc/include/asm/uaccess_64.h106
1 files changed, 77 insertions, 29 deletions
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 30eb4c6414d1..94266a5c5b04 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -12,33 +12,15 @@
#include <asm/spitfire.h>
#include <asm/processor.h>
+#include <asm-generic/access_ok.h>
/*
* Sparc64 is segmented, though more like the M68K than the I386.
* We use the secondary ASI to address user memory, which references a
* completely different VM map, thus there is zero chance of the user
* doing something queer and tricking us into poking kernel memory.
- *
- * What is left here is basically what is needed for the other parts of
- * the kernel that expect to be able to manipulate, erum, "segments".
- * Or perhaps more properly, permissions.
- *
- * "For historical reasons, these macros are grossly misnamed." -Linus
*/
-#define KERNEL_DS ((mm_segment_t) { ASI_P })
-#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
-
-#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-#define set_fs(val) \
-do { \
- current_thread_info()->current_ds = (val).seg; \
- __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
-} while(0)
-
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
@@ -61,16 +43,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
-static inline int __access_ok(const void __user * addr, unsigned long size)
-{
- return 1;
-}
-
-static inline int access_ok(const void __user * addr, unsigned long size)
-{
- return 1;
-}
-
void __retl_efault(void);
/* Uh, these should become the main single-value transfer routines..
@@ -100,6 +72,42 @@ void __retl_efault(void);
struct __large_struct { unsigned long buf[100]; };
#define __m(x) ((struct __large_struct *)(x))
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ type *addr = (type __force *)(dst); \
+ type data = *(type *)src; \
+ register int __pu_ret; \
+ switch (sizeof(type)) { \
+ case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \
+ case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \
+ case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \
+ case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \
+ default: __pu_ret = __put_user_bad(); break; \
+ } \
+ if (__pu_ret) \
+ goto label; \
+} while (0)
+
+#define __put_kernel_asm(x, size, addr, ret) \
+__asm__ __volatile__( \
+ "/* Put kernel asm, inline. */\n" \
+ "1:\t" "st"#size " %1, [%2]\n\t" \
+ "clr %0\n" \
+ "2:\n\n\t" \
+ ".section .fixup,#alloc,#execinstr\n\t" \
+ ".align 4\n" \
+ "3:\n\t" \
+ "sethi %%hi(2b), %0\n\t" \
+ "jmpl %0 + %%lo(2b), %%g0\n\t" \
+ " mov %3, %0\n\n\t" \
+ ".previous\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".word 1b, 3b\n\t" \
+ ".previous\n\n\t" \
+ : "=r" (ret) : "r" (x), "r" (__m(addr)), \
+ "i" (-EFAULT))
+
#define __put_user_nocheck(data, addr, size) ({ \
register int __pu_ret; \
switch (size) { \
@@ -134,6 +142,46 @@ __asm__ __volatile__( \
int __put_user_bad(void);
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ type *addr = (type __force *)(src); \
+ register int __gu_ret; \
+ register unsigned long __gu_val; \
+ switch (sizeof(type)) { \
+ case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
+ case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
+ case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
+ case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \
+ default: \
+ __gu_val = 0; \
+ __gu_ret = __get_user_bad(); \
+ break; \
+ } \
+ if (__gu_ret) \
+ goto label; \
+ *(type *)dst = (__force type) __gu_val; \
+} while (0)
+#define __get_kernel_asm(x, size, addr, ret) \
+__asm__ __volatile__( \
+ "/* Get kernel asm, inline. */\n" \
+ "1:\t" "ld"#size " [%2], %1\n\t" \
+ "clr %0\n" \
+ "2:\n\n\t" \
+ ".section .fixup,#alloc,#execinstr\n\t" \
+ ".align 4\n" \
+ "3:\n\t" \
+ "sethi %%hi(2b), %0\n\t" \
+ "clr %1\n\t" \
+ "jmpl %0 + %%lo(2b), %%g0\n\t" \
+ " mov %3, %0\n\n\t" \
+ ".previous\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".word 1b, 3b\n\n\t" \
+ ".previous\n\t" \
+ : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
+ "i" (-EFAULT))
+
#define __get_user_nocheck(data, addr, size, type) ({ \
register int __gu_ret; \
register unsigned long __gu_val; \