aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/atomic.h2
-rw-r--r--include/asm-sparc64/bitops.h1
-rw-r--r--include/asm-sparc64/cache.h1
-rw-r--r--include/asm-sparc64/elf.h2
-rw-r--r--include/asm-sparc64/futex.h49
-rw-r--r--include/asm-sparc64/kprobes.h10
-rw-r--r--include/asm-sparc64/mman.h1
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/mutex.h9
-rw-r--r--include/asm-sparc64/oplib.h2
-rw-r--r--include/asm-sparc64/processor.h5
-rw-r--r--include/asm-sparc64/system.h18
12 files changed, 34 insertions, 68 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 3789fe315992..25256bdc8aae 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -96,4 +97,5 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define smp_mb__after_atomic_inc() barrier()
#endif
+#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 6388b8376c50..6efc0162fb09 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -119,6 +119,7 @@ static inline unsigned long __ffs(unsigned long word)
*/
#define fls(x) generic_fls(x)
+#define fls64(x) generic_fls64(x)
#ifdef __KERNEL__
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index ade5ec3bfd5a..f7d35a2ae9b8 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -9,7 +9,6 @@
#define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 91458118277e..69539a8ab833 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -119,7 +119,7 @@ typedef struct {
#endif
#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
- ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; })
+ ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 9feff4ce1424..6a332a9f099c 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index 7ba845320f5c..e4efe652b54b 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t;
#define MAX_INSN_SIZE 2
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
+#define arch_remove_kprobe(p) do {} while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -38,15 +39,6 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
};
-#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-#else /* !CONFIG_KPROBES */
-static inline int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-#endif
-
#endif /* _SPARC64_KPROBES_H */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index 01cecf54357b..cb4b6156194d 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -54,6 +54,7 @@
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
+#define MADV_REMOVE 0x6 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 08ba72d7722c..57ee7b306189 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -60,7 +60,7 @@ do { \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
- if ((__tsk)->thread_info->flags & _TIF_32BIT) \
+ if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
pgd_cache = get_pgd_cache((__mm)->pgd); \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sparc64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index d02f1e8ae1a6..3c59b2693fb9 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -163,6 +163,7 @@ enum prom_input_device {
PROMDEV_IKBD, /* input from keyboard */
PROMDEV_ITTYA, /* input from ttya */
PROMDEV_ITTYB, /* input from ttyb */
+ PROMDEV_IRSC, /* input from rsc */
PROMDEV_I_UNK,
};
@@ -174,6 +175,7 @@ enum prom_output_device {
PROMDEV_OSCREEN, /* to screen */
PROMDEV_OTTYA, /* to ttya */
PROMDEV_OTTYB, /* to ttyb */
+ PROMDEV_ORSC, /* to rsc */
PROMDEV_O_UNK,
};
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 3169f3e2237e..cd8d9b4c8658 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *task);
-#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc)
-#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP])
+#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() barrier()
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index b5417529f6f1..af254e581834 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -193,11 +193,7 @@ do { \
* not preserve it's value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM
*/
-#if __GNUC__ >= 3
#define EXTRA_CLOBBER ,"%l1"
-#else
-#define EXTRA_CLOBBER
-#endif
#define switch_to(prev, next, last) \
do { if (test_thread_flag(TIF_PERFCTR)) { \
unsigned long __tmp; \
@@ -212,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+ : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
__asm__ __volatile__( \
"mov %%g4, %%g7\n\t" \
"wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -242,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
- : "0" (next->thread_info), \
+ : "0" (task_thread_info(next)), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
"i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
@@ -257,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;