aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64/system.h')
-rw-r--r--include/asm-ppc64/system.h46
1 files changed, 24 insertions, 22 deletions
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 98d120ca8a91..99b8ca52f101 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -13,7 +13,7 @@
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
-#include <asm/memory.h>
+#include <asm/synch.h>
/*
* Memory barrier.
@@ -48,7 +48,7 @@
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb() eieio()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() __asm__ __volatile__("": : :"memory")
@@ -88,7 +88,7 @@ DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
#ifdef CONFIG_XMON
-extern void xmon_init(void);
+extern void xmon_init(int enable);
#endif
#else
@@ -101,6 +101,9 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
+extern int set_dabr(unsigned long dabr);
+extern void _exception(int signr, struct pt_regs *regs, int code,
+ unsigned long addr);
extern int fix_alignment(struct pt_regs *regs);
extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
int sig);
@@ -117,8 +120,8 @@ extern void giveup_altivec(struct task_struct *);
extern void disable_kernel_altivec(void);
extern void enable_kernel_altivec(void);
extern int emulate_altivec(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
@@ -128,7 +131,12 @@ static inline void flush_altivec_to_thread(struct task_struct *t)
}
#endif
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+
extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern unsigned long memory_limit;
/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
extern unsigned char e2a(unsigned char);
@@ -141,12 +149,7 @@ struct thread_struct;
extern struct task_struct * _switch(struct thread_struct *prev,
struct thread_struct *next);
-static inline int __is_processor(unsigned long pv)
-{
- unsigned long pvr;
- asm("mfspr %0, 0x11F" : "=r" (pvr));
- return(PVR_VER(pvr) == pv);
-}
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
/*
* Atomic exchange
@@ -158,7 +161,7 @@ static inline int __is_processor(unsigned long pv)
* is more like most of the other architectures.
*/
static __inline__ unsigned long
-__xchg_u32(volatile int *m, unsigned long val)
+__xchg_u32(volatile unsigned int *m, unsigned long val)
{
unsigned long dummy;
@@ -200,7 +203,7 @@ __xchg_u64(volatile long *m, unsigned long val)
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
case 4:
@@ -223,7 +226,7 @@ __xchg(volatile void *ptr, unsigned long x, int size)
#define __HAVE_ARCH_CMPXCHG 1
static __inline__ unsigned long
-__cmpxchg_u32(volatile int *p, int old, int new)
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
unsigned int prev;
@@ -271,7 +274,8 @@ __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
{
switch (size) {
case 4:
@@ -283,13 +287,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
/*
* We handle most unaligned accesses in hardware. On the other hand
@@ -302,5 +302,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define arch_align_stack(x) (x)
+extern unsigned long reloc_offset(void);
+
#endif /* __KERNEL__ */
#endif