aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-01-05 09:17:33 +0900
committerTejun Heo <tj@kernel.org>2010-01-05 09:17:33 +0900
commit32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch)
treeb1ce838a37044bb38dfc128e2116ca35630e629a /arch/parisc
parentpercpu: refactor the code in pcpu_[de]populate_chunk() (diff)
parentMerge branch 'limits_cleanup' of git://decibel.fi.muni.cz/~xslaby/linux (diff)
downloadlinux-dev-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.xz
linux-dev-32032df6c2f6c9c6b2ada2ce42322231824f70c2.zip
Merge branch 'master' into percpu
Conflicts: arch/powerpc/platforms/pseries/hvCall.S include/linux/percpu.h
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/hpux/sys_hpux.c7
-rw-r--r--arch/parisc/include/asm/asm-offsets.h1
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/bug.h4
-rw-r--r--arch/parisc/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/include/asm/elf.h1
-rw-r--r--arch/parisc/include/asm/fcntl.h7
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/ftrace.h14
-rw-r--r--arch/parisc/include/asm/hardirq.h20
-rw-r--r--arch/parisc/include/asm/ptrace.h5
-rw-r--r--arch/parisc/include/asm/socket.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/include/asm/syscall.h40
-rw-r--r--arch/parisc/include/asm/thread_info.h14
-rw-r--r--arch/parisc/kernel/asm-offsets.c7
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/irq.c13
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/perf.c2
-rw-r--r--arch/parisc/kernel/ptrace.c42
-rw-r--r--arch/parisc/kernel/signal.c6
-rw-r--r--arch/parisc/kernel/smp.c9
-rw-r--r--arch/parisc/kernel/sys_parisc.c30
-rw-r--r--arch/parisc/kernel/sys_parisc32.c77
-rw-r--r--arch/parisc/kernel/syscall.S22
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/kernel/unwind.c52
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S16
-rw-r--r--arch/parisc/lib/bitops.c4
-rw-r--r--arch/parisc/mm/init.c11
33 files changed, 250 insertions, 273 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f388dc68f605..524d9352f17e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
select BUG
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
+ select HAVE_ARCH_TRACEHOOK
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 18072e03a019..92343bd35fa3 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -445,12 +445,7 @@ done:
int hpux_pipe(int *kstack_fildes)
{
- int error;
-
- lock_kernel();
- error = do_pipe_flags(kstack_fildes, 0);
- unlock_kernel();
- return error;
+ return do_pipe_flags(kstack_fildes, 0);
}
/* lies - says it works, but it really didn't lock anything */
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 8cfc553fc837..75e46c557a16 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#endif
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 724395143f26..7a73b615c23d 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -42,6 +42,7 @@ void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be84..19f6cb1a4a1c 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
index 5f39d5597ced..f357fc693c89 100644
--- a/arch/parisc/include/asm/fcntl.h
+++ b/arch/parisc/include/asm/fcntl.h
@@ -1,14 +1,13 @@
#ifndef _PARISC_FCNTL_H
#define _PARISC_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_APPEND 000000010
#define O_BLKSEEK 000000100 /* HPUX only */
#define O_CREAT 000000400 /* not fcntl */
#define O_EXCL 000002000 /* not fcntl */
#define O_LARGEFILE 000004000
-#define O_SYNC 000100000
+#define __O_SYNC 000100000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
#define O_NOCTTY 000400000 /* not fcntl */
#define O_DSYNC 001000000 /* HPUX only */
@@ -28,8 +27,6 @@
#define F_SETOWN 12 /* for sockets. */
#define F_SETSIG 13 /* for sockets. */
#define F_GETSIG 14 /* for sockets. */
-#define F_GETOWN_EX 15
-#define F_SETOWN_EX 16
/* for posix fcntl() and lockf() */
#define F_RDLCK 01
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index de3fe3a18229..6fec4d4a1a18 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -21,9 +21,9 @@
#define KERNEL_MAP_END (TMPALIAS_MAP_START)
#ifndef __ASSEMBLY__
-extern void *vmalloc_start;
+extern void *parisc_vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024)
-#define VMALLOC_START ((unsigned long)vmalloc_start)
+#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END)
#endif /*__ASSEMBLY__*/
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 2fa05dd6aeee..72c0fafaa039 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -20,6 +20,20 @@ struct ftrace_ret_stack {
* Defined in entry.S
*/
extern void return_to_handler(void);
+
+
+extern unsigned long return_address(unsigned int);
+
+#define HAVE_ARCH_CALLER_ADDR
+
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#define CALLER_ADDR4 return_address(4)
+#define CALLER_ADDR5 return_address(5)
+#define CALLER_ADDR6 return_address(6)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index ce93133d5112..0d68184a76cb 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,29 +1,11 @@
/* hardirq.h: PA-RISC hard IRQ support.
*
* Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
- *
- * The locking is really quite interesting. There's a cpu-local
- * count of how many interrupts are being handled, and a global
- * lock. An interrupt can only be serviced if the global lock
- * is free. You can't be sure no more interrupts are being
- * serviced until you've acquired the lock and then checked
- * all the per-cpu interrupt counts are all zero. It's a specialised
- * br_lock, and that's exactly how Sparc does it. We don't because
- * it's more locking for us. This way is lock-free in the interrupt path.
*/
#ifndef _PARISC_HARDIRQ_H
#define _PARISC_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned long __softirq_pending; /* set_bit is used on this */
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-void ack_bad_irq(unsigned int irq);
+#include <asm-generic/hardirq.h>
#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 302f68dc889c..aead40b16dd8 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task);
#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
+#define user_stack_pointer(regs) ((regs)->gr[30])
unsigned long profile_pc(struct pt_regs *);
extern void show_regs(struct pt_regs *);
-#endif
+
+
+#endif /* __KERNEL__ */
#endif
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
index 960b1e5d8e16..225b7d6a1a0a 100644
--- a/arch/parisc/include/asm/socket.h
+++ b/arch/parisc/include/asm/socket.h
@@ -59,6 +59,8 @@
#define SO_TIMESTAMPING 0x4020
#define SCM_TIMESTAMPING SO_TIMESTAMPING
+#define SO_RXQ_OVFL 0x4021
+
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..74036f436a3b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
return 0;
/* Wait until we have a realistic chance at the lock */
- while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..8c373aa28a86 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
-} raw_spinlock_t;
+} arch_spinlock_t;
typedef struct {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644
index 000000000000..8bdfd2c8c39f
--- /dev/null
+++ b/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,40 @@
+/* syscall.h */
+
+#ifndef _ASM_PARISC_SYSCALL_H_
+#define _ASM_PARISC_SYSCALL_H_
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *tsk,
+ struct pt_regs *regs)
+{
+ return regs->gr[20];
+}
+
+static inline void syscall_get_arguments(struct task_struct *tsk,
+ struct pt_regs *regs, unsigned int i,
+ unsigned int n, unsigned long *args)
+{
+ BUG_ON(i);
+
+ switch (n) {
+ case 6:
+ args[5] = regs->gr[21];
+ case 5:
+ args[4] = regs->gr[22];
+ case 4:
+ args[3] = regs->gr[23];
+ case 3:
+ args[2] = regs->gr[24];
+ case 2:
+ args[1] = regs->gr[25];
+ case 1:
+ args[0] = regs->gr[26];
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index ac775a76bff7..7ecc1039cfed 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -32,6 +32,11 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
+/* how to get the thread information struct from C */
+#define current_thread_info() ((struct thread_info *)mfctl(30))
+
+#endif /* !__ASSEMBLY */
+
/* thread information allocation */
#define THREAD_SIZE_ORDER 2
@@ -40,11 +45,6 @@ struct thread_info {
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
-/* how to get the thread information struct from C */
-#define current_thread_info() ((struct thread_info *)mfctl(30))
-
-#endif /* !__ASSEMBLY */
-
#define PREEMPT_ACTIVE_BIT 28
#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
@@ -60,6 +60,8 @@ struct thread_info {
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
#define TIF_FREEZE 7 /* is freezing for suspend */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
+#define TIF_SINGLESTEP 9 /* single stepping? */
+#define TIF_BLOCKSTEP 10 /* branch stepping? */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -69,6 +71,8 @@ struct thread_info {
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 699cf8ef2118..ec787b411e9a 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -244,9 +244,6 @@ int main(void)
DEFINE(THREAD_SZ, sizeof(struct thread_info));
DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
BLANK();
- DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
- DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
- BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
@@ -270,8 +267,8 @@ int main(void)
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
- DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT);
- DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT);
+ DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
+ DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 8c4712b74dc1..3a44f7f704fa 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -2047,12 +2047,13 @@ syscall_do_signal:
b,n syscall_check_sig
syscall_restore:
- /* Are we being ptraced? */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldw TASK_PTRACE(%r1), %r19
- bb,< %r19,31,syscall_restore_rfi
- nop
+ /* Are we being ptraced? */
+ ldw TASK_FLAGS(%r1),%r19
+ ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+ and,COND(=) %r19,%r2,%r0
+ b,n syscall_restore_rfi
ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
rest_fp %r19
@@ -2113,16 +2114,16 @@ syscall_restore_rfi:
ldi 0x0b,%r20 /* Create new PSW */
depi -1,13,1,%r20 /* C, Q, D, and I bits */
- /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
- * set in include/linux/ptrace.h and converted to PA bitmap
+ /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+ * set in thread_info.h and converted to PA bitmap
* numbers in asm-offsets.c */
- /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
- extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
+ /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+ extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
depi -1,27,1,%r20 /* R bit */
- /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
- extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
+ /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+ extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
STREG %r20,TASK_PT_PSW(%r1)
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 330f536a9324..efbcee5d2220 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
#endif
static struct irq_chip cpu_interrupt_type = {
- .typename = "CPU",
+ .name = "CPU",
.startup = cpu_startup_irq,
.shutdown = cpu_disable_irq,
.enable = cpu_enable_irq,
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
@@ -423,8 +423,3 @@ void __init init_IRQ(void)
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
-
-void ack_bad_irq(unsigned int irq)
-{
- printk(KERN_WARNING "unexpected IRQ %d\n", irq);
-}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 61ee0eec4e69..212074653df7 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr,
* ourselves */
for (i = 1; i < hdr->e_shnum; i++) {
if(sechdrs[i].sh_type == SHT_SYMTAB
- && (sechdrs[i].sh_type & SHF_ALLOC)) {
+ && (sechdrs[i].sh_flags & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 75099efb3bf3..f9f6783e4bdd 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -24,7 +24,7 @@
*
* This driver programs the PCX-U/PCX-W performance counters
* on the PA-RISC 2.0 chips. The driver keeps all images now
- * internally to the kernel to hopefully eliminate the possiblity
+ * internally to the kernel to hopefully eliminate the possibility
* of a bad image halting the CPU. Also, there are different
* images for the PCX-W and later chips vs the PCX-U chips.
*
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 927db3668b6f..c4f49e45129d 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/personality.h>
#include <linux/security.h>
@@ -35,7 +36,8 @@
*/
void ptrace_disable(struct task_struct *task)
{
- task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* make sure the trap bits are not set */
pa_psw(task)->r = 0;
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task)
void user_enable_single_step(struct task_struct *task)
{
- task->ptrace &= ~PT_BLOCKSTEP;
- task->ptrace |= PT_SINGLESTEP;
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
if (pa_psw(task)->n) {
struct siginfo si;
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task)
void user_enable_block_step(struct task_struct *task)
{
- task->ptrace &= ~PT_SINGLESTEP;
- task->ptrace |= PT_BLOCKSTEP;
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* Enable taken branch trap. */
pa_psw(task)->r = 0;
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
#endif
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ return -1L;
+
+ return regs->gr[20];
+}
-void syscall_trace(void)
+void do_syscall_trace_exit(struct pt_regs *regs)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ int stepping = test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP);
+
+ if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, stepping);
}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8eb3c63c407a..fb37ac52e46c 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -21,11 +21,11 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
-#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
@@ -34,7 +34,6 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
#include "signal32.h"
#endif
@@ -468,6 +467,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ tracehook_signal_handler(sig, info, ka, regs, 0);
+
return 1;
}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 1fd0f0cec037..3f2fce8ce6b6 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -60,8 +60,6 @@ static int smp_debug_lvl = 0;
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
-DEFINE_SPINLOCK(smp_lock);
-
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
@@ -69,7 +67,7 @@ static volatile int cpu_now_booting __cpuinitdata;
static int parisc_max_cpus __cpuinitdata = 1;
-DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
+static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
IPI_NOP=0,
@@ -438,6 +436,11 @@ void __init smp_prepare_boot_cpu(void)
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(ipi_lock, cpu));
+
init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 71b31957c8f1..9147391afb03 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -110,37 +110,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long pgoff)
-{
- struct file * file = NULL;
- unsigned long error = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file != NULL)
- fput(file);
-out:
- return error;
-}
-
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
@@ -148,7 +125,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
- return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 561388b17c91..9779ece2b070 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -26,13 +26,7 @@
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/ncp_fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
@@ -90,77 +84,6 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
return -ENOSYS;
}
-#ifdef CONFIG_SYSCTL
-
-struct __sysctl_args32 {
- u32 name;
- int nlen;
- u32 oldval;
- u32 oldlenp;
- u32 newval;
- u32 newlen;
- u32 __unused[4];
-};
-
-asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
-{
-#ifndef CONFIG_SYSCTL_SYSCALL
- return -ENOSYS;
-#else
- struct __sysctl_args32 tmp;
- int error;
- unsigned int oldlen32;
- size_t oldlen, __user *oldlenp = NULL;
- unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7;
-
- DBG(("sysctl32(%p)\n", args));
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- if (tmp.oldval && tmp.oldlenp) {
- /* Duh, this is ugly and might not work if sysctl_args
- is in read-only memory, but do_sysctl does indirectly
- a lot of uaccess in both directions and we'd have to
- basically copy the whole sysctl.c here, and
- glibc's __sysctl uses rw memory for the structure
- anyway. */
- /* a possibly better hack than this, which will avoid the
- * problem if the struct is read only, is to push the
- * 'oldlen' value out to the user's stack instead. -PB
- */
- if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
- return -EFAULT;
- oldlen = oldlen32;
- if (put_user(oldlen, (size_t *)addr))
- return -EFAULT;
- oldlenp = (size_t *)addr;
- }
-
- lock_kernel();
- error = do_sysctl((int __user *)(u64)tmp.name, tmp.nlen,
- (void __user *)(u64)tmp.oldval, oldlenp,
- (void __user *)(u64)tmp.newval, tmp.newlen);
- unlock_kernel();
- if (oldlenp) {
- if (!error) {
- if (get_user(oldlen, (size_t *)addr)) {
- error = -EFAULT;
- } else {
- oldlen32 = oldlen;
- if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
- error = -EFAULT;
- }
- }
- if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
- error = -EFAULT;
- }
- return error;
-#endif
-}
-
-#endif /* CONFIG_SYSCTL */
-
asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
struct compat_timespec __user *interval)
{
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 59fc1a43ec3e..f5f96021caa0 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -288,18 +288,23 @@ tracesys:
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
- ldil L%syscall_trace,%r1
+ copy %r2,%r26
+ ldil L%do_syscall_trace_enter,%r1
ldil L%tracesys_next,%r2
- be R%syscall_trace(%sr7,%r1)
+ be R%do_syscall_trace_enter(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
-tracesys_next:
+tracesys_next:
+ /* do_syscall_trace_enter either returned the syscallno, or -1L,
+ * so we skip restoring the PT_GR20 below, since we pulled it from
+ * task->thread.regs.gr[20] above.
+ */
+ copy %ret0,%r20
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
- LDREG TASK_PT_GR20(%r1), %r20
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
@@ -336,7 +341,8 @@ tracesys_exit:
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
+ ldo TASK_REGS(%r1),%r26
+ bl do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
@@ -353,12 +359,12 @@ tracesys_exit:
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
- LDREG 0(%r1), %r1
+ LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
- nop
+ bl do_syscall_trace_exit,%r2
+ ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 843f423dec67..01c4fcf8f481 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -234,7 +234,7 @@
ENTRY_SAME(getsid)
ENTRY_SAME(fdatasync)
/* struct __sysctl_args is a mess */
- ENTRY_DIFF(sysctl)
+ ENTRY_COMP(sysctl)
ENTRY_SAME(mlock) /* 150 */
ENTRY_SAME(munlock)
ENTRY_SAME(mlockall)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 69dad5a850a8..d58eac1a8288 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
+#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
@@ -28,7 +29,7 @@
#define dbg(x...)
#endif
-#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
+#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
@@ -115,24 +116,18 @@ unwind_table_init(struct unwind_table *table, const char *name,
}
}
+static int cmp_unwind_table_entry(const void *a, const void *b)
+{
+ return ((const struct unwind_table_entry *)a)->region_start
+ - ((const struct unwind_table_entry *)b)->region_start;
+}
+
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
- struct unwind_table_entry el, *p, *q;
-
- for (p = start + 1; p < finish; ++p) {
- if (p[0].region_start < p[-1].region_start) {
- el = *p;
- q = p;
- do {
- q[0] = q[-1];
- --q;
- } while (q > start &&
- el.region_start < q[-1].region_start);
- *q = el;
- }
- }
+ sort(start, finish - start, sizeof(struct unwind_table_entry),
+ cmp_unwind_table_entry, NULL);
}
struct unwind_table *
@@ -417,3 +412,30 @@ int unwind_to_user(struct unwind_frame_info *info)
return ret;
}
+
+unsigned long return_address(unsigned int level)
+{
+ struct unwind_frame_info info;
+ struct pt_regs r;
+ unsigned long sp;
+
+ /* initialize unwind info */
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long) current_text_addr();
+ r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, current, &r);
+
+ /* unwind stack */
+ ++level;
+ do {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ return 0;
+ if (!__kernel_text_address(info.ip)) {
+ return 0;
+ }
+ } while (info.ip && level--);
+
+ return info.ip;
+}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 775be2791bc2..9dab4a4e09f7 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
@@ -77,9 +78,6 @@ SECTIONS
*/
. = ALIGN(PAGE_SIZE);
data_start = .;
- EXCEPTION_TABLE(16)
-
- NOTES
/* unwind info */
.PARISC.unwind : {
@@ -88,6 +86,9 @@ SECTIONS
__stop___unwind = .;
}
+ EXCEPTION_TABLE(16)
+ NOTES
+
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -134,6 +135,15 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(16384)
INIT_DATA_SECTION(16)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index d5aca31fddbb..13b6e3e59b99 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -434,8 +434,8 @@ void mark_rodata_ro(void)
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
& ~(VM_MAP_OFFSET-1)))
-void *vmalloc_start __read_mostly;
-EXPORT_SYMBOL(vmalloc_start);
+void *parisc_vmalloc_start __read_mostly;
+EXPORT_SYMBOL(parisc_vmalloc_start);
#ifdef CONFIG_PA11
unsigned long pcxl_dma_start __read_mostly;
@@ -496,13 +496,14 @@ void __init mem_init(void)
#ifdef CONFIG_PA11
if (hppa_dma_ops == &pcxl_dma_ops) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
- vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
+ parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ + PCXL_DMA_MAP_SIZE);
} else {
pcxl_dma_start = 0;
- vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
}
#else
- vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
#endif
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",