aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 16:27:50 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 16:27:50 -0800
commit602d4a7e2f4b843d1a67375d4d7104073495b758 (patch)
tree0b9f184e54fa693c27bd5986c114bdcf6949f788 /include
parentNFS,SUNRPC,NLM: fix unused variable warnings when CONFIG_SYSCTL is disabled (diff)
parentpowerpc: Fix vmlinux.lds.S for 32-bit (diff)
downloadlinux-dev-602d4a7e2f4b843d1a67375d4d7104073495b758.tar.xz
linux-dev-602d4a7e2f4b843d1a67375d4d7104073495b758.zip
Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h34
-rw-r--r--include/asm-powerpc/elf.h22
-rw-r--r--include/asm-powerpc/futex.h (renamed from include/asm-ppc64/futex.h)45
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/ipcbuf.h34
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/iseries/hv_call.h (renamed from include/asm-ppc64/iSeries/HvCall.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_call_event.h (renamed from include/asm-ppc64/iSeries/HvCallEvent.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_call_sc.h (renamed from include/asm-ppc64/iSeries/HvCallSc.h)6
-rw-r--r--include/asm-powerpc/iseries/hv_call_xm.h (renamed from include/asm-ppc64/iSeries/HvCallXm.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_lp_config.h (renamed from include/asm-ppc64/iSeries/HvLpConfig.h)12
-rw-r--r--include/asm-powerpc/iseries/hv_lp_event.h (renamed from include/asm-ppc64/iSeries/HvLpEvent.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_types.h (renamed from include/asm-ppc64/iSeries/HvTypes.h)6
-rw-r--r--include/asm-powerpc/iseries/iseries_io.h (renamed from include/asm-ppc64/iSeries/iSeries_io.h)6
-rw-r--r--include/asm-powerpc/iseries/it_exp_vpd_panel.h (renamed from include/asm-ppc64/iSeries/ItExtVpdPanel.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_naca.h (renamed from include/asm-ppc64/iSeries/ItLpNaca.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_queue.h (renamed from include/asm-ppc64/iSeries/ItLpQueue.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_reg_save.h (renamed from include/asm-ppc64/iSeries/ItLpRegSave.h)4
-rw-r--r--include/asm-powerpc/iseries/lpar_map.h (renamed from include/asm-ppc64/iSeries/LparMap.h)6
-rw-r--r--include/asm-powerpc/iseries/mf.h (renamed from include/asm-ppc64/iSeries/mf.h)10
-rw-r--r--include/asm-powerpc/iseries/vio.h (renamed from include/asm-ppc64/iSeries/vio.h)10
-rw-r--r--include/asm-powerpc/kexec.h49
-rw-r--r--include/asm-powerpc/machdep.h1
-rw-r--r--include/asm-powerpc/numnodes.h7
-rw-r--r--include/asm-powerpc/ppc_asm.h7
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/ptrace.h (renamed from include/asm-ppc64/ptrace.h)143
-rw-r--r--include/asm-powerpc/rtas.h25
-rw-r--r--include/asm-powerpc/sigcontext.h (renamed from include/asm-ppc64/sigcontext.h)41
-rw-r--r--include/asm-powerpc/smp.h (renamed from include/asm-ppc64/smp.h)45
-rw-r--r--include/asm-powerpc/sparsemem.h (renamed from include/asm-ppc64/sparsemem.h)6
-rw-r--r--include/asm-powerpc/stat.h81
-rw-r--r--include/asm-powerpc/system.h48
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/time.h2
-rw-r--r--include/asm-powerpc/tlb.h (renamed from include/asm-ppc/tlb.h)59
-rw-r--r--include/asm-powerpc/tlbflush.h146
-rw-r--r--include/asm-powerpc/uaccess.h468
-rw-r--r--include/asm-powerpc/ucontext.h40
-rw-r--r--include/asm-ppc/bitops.h460
-rw-r--r--include/asm-ppc/commproc.h2
-rw-r--r--include/asm-ppc/futex.h53
-rw-r--r--include/asm-ppc/ipcbuf.h29
-rw-r--r--include/asm-ppc/kexec.h40
-rw-r--r--include/asm-ppc/ptrace.h152
-rw-r--r--include/asm-ppc/sigcontext.h15
-rw-r--r--include/asm-ppc/stat.h69
-rw-r--r--include/asm-ppc/tlbflush.h115
-rw-r--r--include/asm-ppc/uaccess.h393
-rw-r--r--include/asm-ppc/ucontext.h27
-rw-r--r--include/asm-ppc64/bitops.h360
-rw-r--r--include/asm-ppc64/dart.h59
-rw-r--r--include/asm-ppc64/io.h2
-rw-r--r--include/asm-ppc64/ipcbuf.h28
-rw-r--r--include/asm-ppc64/kexec.h41
-rw-r--r--include/asm-ppc64/mmu_context.h15
-rw-r--r--include/asm-ppc64/naca.h24
-rw-r--r--include/asm-ppc64/numnodes.h7
-rw-r--r--include/asm-ppc64/nvram.h2
-rw-r--r--include/asm-ppc64/paca.h2
-rw-r--r--include/asm-ppc64/plpar_wrappers.h120
-rw-r--r--include/asm-ppc64/ppc32.h122
-rw-r--r--include/asm-ppc64/spinlock.h2
-rw-r--r--include/asm-ppc64/stat.h60
-rw-r--r--include/asm-ppc64/tlb.h39
-rw-r--r--include/asm-ppc64/tlbflush.h52
-rw-r--r--include/asm-ppc64/uaccess.h341
-rw-r--r--include/asm-ppc64/ucontext.h22
69 files changed, 1629 insertions, 3024 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 000000000000..dc25c53704d5
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
+/*
+ * PowerPC atomic bit operations.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on ppc32:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_BITOPS_H
+#define _ASM_POWERPC_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+#include <asm/synch.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#ifdef CONFIG_PPC64
+#define LARXL "ldarx"
+#define STCXL "stdcx."
+#define CNTLZL "cntlzd"
+#else
+#define LARXL "lwarx"
+#define STCXL "stwcx."
+#define CNTLZL "cntlzw"
+#endif
+
+static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "or %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "andc %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "xor %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ int test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
+ "or %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
+ "andc %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
+ "xor %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
+{
+ unsigned long old;
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "or %0,%0,%2\n"
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "=m" (*addr)
+ : "r" (mask), "r" (addr), "m" (*addr)
+ : "cc");
+}
+
+/* Non-atomic versions */
+static __inline__ int test_bit(unsigned long nr,
+ __const__ volatile unsigned long *addr)
+{
+ return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+static __inline__ void __set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p |= mask;
+}
+
+static __inline__ void __clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p &= ~mask;
+}
+
+static __inline__ void __change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p ^= mask;
+}
+
+static __inline__ int __test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/*
+ * Return the zero-based bit position (LE, not IBM bit numbering) of
+ * the most significant 1-bit in a double word.
+ */
+static __inline__ int __ilog2(unsigned long x)
+{
+ int lz;
+
+ asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
+ return BITS_PER_LONG - 1 - lz;
+}
+
+/*
+ * Determines the bit position of the least significant 0 bit in the
+ * specified double word. The returned bit position will be
+ * zero-based, starting from the right side (63/31 - 0).
+ */
+static __inline__ unsigned long ffz(unsigned long x)
+{
+ /* no zero exists anywhere in the 8 byte area. */
+ if ((x = ~x) == 0)
+ return BITS_PER_LONG;
+
+ /*
+ * Calculate the bit position of the least signficant '1' bit in x
+ * (since x has been changed this will actually be the least signficant
+ * '0' bit in * the original x). Note: (x & -x) gives us a mask that
+ * is the least significant * (RIGHT-most) 1-bit of the value in x.
+ */
+ return __ilog2(x & -x);
+}
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __ilog2(x & -x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ unsigned long i = (unsigned long)x;
+ return __ilog2(i & -i) + 1;
+}
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __inline__ int fls(unsigned int x)
+{
+ int lz;
+
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/* Little-endian versions */
+
+static __inline__ int test_le_bit(unsigned long nr,
+ __const__ unsigned long *addr)
+{
+ __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
+ return (tmp[nr >> 3] >> (nr & 7)) & 1;
+}
+
+#define __set_le_bit(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __clear_le_bit(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define test_and_set_le_bit(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define test_and_clear_le_bit(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define __test_and_set_le_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __test_and_clear_le_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
+unsigned long find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/* Bitmap functions for the ext2 filesystem */
+
+#define ext2_set_bit(nr,addr) \
+ __test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit(nr, addr) \
+ __test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
+
+#define ext2_find_first_zero_bit(addr, size) \
+ find_first_zero_le_bit((unsigned long*)addr, size)
+#define ext2_find_next_zero_bit(addr, size, off) \
+ find_next_zero_le_bit((unsigned long*)addr, size, off)
+
+/* Bitmap functions for the minix filesystem. */
+
+#define minix_test_and_set_bit(nr,addr) \
+ __test_and_set_le_bit(nr, (unsigned long *)addr)
+#define minix_set_bit(nr,addr) \
+ __set_le_bit(nr, (unsigned long *)addr)
+#define minix_test_and_clear_bit(nr,addr) \
+ __test_and_clear_le_bit(nr, (unsigned long *)addr)
+#define minix_test_bit(nr,addr) \
+ test_le_bit(nr, (unsigned long *)addr)
+
+#define minix_find_first_zero_bit(addr,size) \
+ find_first_zero_le_bit((unsigned long *)addr, size)
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#ifdef CONFIG_PPC64
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#else
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index e4d028e87020..d625ee55f957 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -12,20 +12,16 @@
#ifndef __ASSEMBLY__
#ifdef __powerpc64__
-#define BUG_TABLE_ENTRY(label, line, file, func) \
- ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n"
-#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
-#define DATA_TYPE long long
+#define BUG_TABLE_ENTRY ".llong"
+#define BUG_TRAP_OP "tdnei"
#else
-#define BUG_TABLE_ENTRY(label, line, file, func) \
- ".long " #label ", " #line ", " #file ", " #func "\n"
-#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
-#define DATA_TYPE int
+#define BUG_TABLE_ENTRY ".long"
+#define BUG_TRAP_OP "twnei"
#endif /* __powerpc64__ */
struct bug_entry {
unsigned long bug_addr;
- int line;
+ long line;
const char *file;
const char *function;
};
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr);
#define BUG() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
- ".section __bug_table,\"a\"\n\t" \
- BUG_TABLE_ENTRY(1b,%0,%1,%2) \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#define BUG_ON(x) do { \
__asm__ __volatile__( \
- TRAP_OP(%0,0) \
- ".section __bug_table,\"a\"\n\t" \
- BUG_TABLE_ENTRY(1b,%1,%2,%3) \
+ "1: "BUG_TRAP_OP" %0,0\n" \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
".previous" \
- : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \
+ : : "r" ((long)(x)), "i" (__LINE__), \
"i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#define WARN_ON(x) do { \
__asm__ __volatile__( \
- TRAP_OP(%0,0) \
- ".section __bug_table,\"a\"\n\t" \
- BUG_TABLE_ENTRY(1b,%1,%2,%3) \
+ "1: "BUG_TRAP_OP" %0,0\n" \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
".previous" \
- : : "r" ((DATA_TYPE)(x)), \
+ : : "r" ((long)(x)), \
"i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index d22b10021b5d..d140577d0a05 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -178,18 +178,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
struct pt_regs *regs)
{
- int i;
- int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
+ int i, nregs;
- if (gprs > ELF_NGREG)
- gprs = ELF_NGREG;
+ memset((void *)elf_regs, 0, sizeof(elf_gregset_t));
- for (i=0; i < gprs; i++)
- elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
-
- memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \
- sizeof(elf_gregset_t) - sizeof(struct pt_regs));
+ /* Our registers are always unsigned longs, whether we're a 32 bit
+ * process or 64 bit, on either a 64 bit or 32 bit kernel.
+ * Don't use ELF_GREG_TYPE here. */
+ nregs = sizeof(struct pt_regs) / sizeof(unsigned long);
+ if (nregs > ELF_NGREG)
+ nregs = ELF_NGREG;
+ for (i = 0; i < nregs; i++) {
+ /* This will correctly truncate 64 bit registers to 32 bits
+ * for a 32 bit process on a 64 bit kernel. */
+ elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
+ }
}
#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
diff --git a/include/asm-ppc64/futex.h b/include/asm-powerpc/futex.h
index 266b460de44e..37c94e52ab6d 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
+#ifndef _ASM_POWERPC_FUTEX_H
+#define _ASM_POWERPC_FUTEX_H
#ifdef __KERNEL__
@@ -7,28 +7,29 @@
#include <asm/errno.h>
#include <asm/synch.h>
#include <asm/uaccess.h>
+#include <asm/ppc_asm.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile (SYNC_ON_SMP \
-"1: lwarx %0,0,%2\n" \
- insn \
-"2: stwcx. %1,0,%2\n\
- bne- 1b\n\
- li %1,0\n\
-3: .section .fixup,\"ax\"\n\
-4: li %1,%3\n\
- b 3b\n\
- .previous\n\
- .section __ex_table,\"a\"\n\
- .align 3\n\
- .llong 1b,4b,2b,4b\n\
- .previous" \
- : "=&r" (oldval), "=&r" (ret) \
- : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
+ __asm__ __volatile ( \
+ SYNC_ON_SMP \
+"1: lwarx %0,0,%2\n" \
+ insn \
+"2: stwcx. %1,0,%2\n" \
+ "bne- 1b\n" \
+ "li %1,0\n" \
+"3: .section .fixup,\"ax\"\n" \
+"4: li %1,%3\n" \
+ "b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".align 3\n" \
+ DATAL " 1b,4b,2b,4b\n" \
+ ".previous" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
: "cr0", "memory")
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -79,5 +80,5 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
return ret;
}
-#endif
-#endif
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8b..279a6229584b 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 000000000000..2c3e1d94db1d
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_POWERPC_IPCBUF_H
+#define _ASM_POWERPC_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for the powerpc is identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
+ * kernel. Note extra padding because this structure is passed back
+ * and forth between kernel and user space. Pad space is left for:
+ * - 1 32-bit value to fill up for 8-byte alignment
+ * - 2 miscellaneous 64-bit values
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad1;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+};
+
+#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index c7c3f912a3c2..b3935ea28fff 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
#define IC_INVALID 0
#define IC_OPEN_PIC 1
#define IC_PPC_XIC 2
-#define IC_BPA_IIC 3
+#define IC_CELL_PIC 3
#define IC_ISERIES 4
extern u64 ppc64_interrupt_controller;
diff --git a/include/asm-ppc64/iSeries/HvCall.h b/include/asm-powerpc/iseries/hv_call.h
index c3f19475c0d9..e9f831c9a5e5 100644
--- a/include/asm-ppc64/iSeries/HvCall.h
+++ b/include/asm-powerpc/iseries/hv_call.h
@@ -20,11 +20,11 @@
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
-#ifndef _HVCALL_H
-#define _HVCALL_H
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_H
-#include <asm/iSeries/HvCallSc.h>
-#include <asm/iSeries/HvTypes.h>
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
#include <asm/paca.h>
/* Type of yield for HvCallBaseYieldProcessor */
@@ -110,4 +110,4 @@ static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
}
-#endif /* _HVCALL_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/include/asm-ppc64/iSeries/HvCallEvent.h b/include/asm-powerpc/iseries/hv_call_event.h
index 5d9a327d0122..46763a30590a 100644
--- a/include/asm-ppc64/iSeries/HvCallEvent.h
+++ b/include/asm-powerpc/iseries/hv_call_event.h
@@ -20,11 +20,11 @@
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
-#ifndef _HVCALLEVENT_H
-#define _HVCALLEVENT_H
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
-#include <asm/iSeries/HvCallSc.h>
-#include <asm/iSeries/HvTypes.h>
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
#include <asm/abs_addr.h>
struct HvLpEvent;
@@ -250,4 +250,4 @@ static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
}
-#endif /* _HVCALLEVENT_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/include/asm-ppc64/iSeries/HvCallSc.h b/include/asm-powerpc/iseries/hv_call_sc.h
index a62cef3822f9..dec7e9d9ab78 100644
--- a/include/asm-ppc64/iSeries/HvCallSc.h
+++ b/include/asm-powerpc/iseries/hv_call_sc.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _HVCALLSC_H
-#define _HVCALLSC_H
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
#include <linux/types.h>
@@ -48,4 +48,4 @@ extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
-#endif /* _HVCALLSC_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/include/asm-ppc64/iSeries/HvCallXm.h b/include/asm-powerpc/iseries/hv_call_xm.h
index 8b9ba608daaf..ca9202cb01ed 100644
--- a/include/asm-ppc64/iSeries/HvCallXm.h
+++ b/include/asm-powerpc/iseries/hv_call_xm.h
@@ -2,11 +2,11 @@
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from SLIC.
*/
-#ifndef _HVCALLXM_H
-#define _HVCALLXM_H
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
-#include <asm/iSeries/HvCallSc.h>
-#include <asm/iSeries/HvTypes.h>
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
#define HvCallXmGetTceTableParms HvCallXm + 0
#define HvCallXmTestBus HvCallXm + 1
@@ -75,4 +75,4 @@ static inline u64 HvCallXm_loadTod(void)
return HvCall0(HvCallXmLoadTod);
}
-#endif /* _HVCALLXM_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/include/asm-ppc64/iSeries/HvLpConfig.h b/include/asm-powerpc/iseries/hv_lp_config.h
index f1cf1e70ca3c..bc00f036bca0 100644
--- a/include/asm-ppc64/iSeries/HvLpConfig.h
+++ b/include/asm-powerpc/iseries/hv_lp_config.h
@@ -16,17 +16,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _HVLPCONFIG_H
-#define _HVLPCONFIG_H
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
+#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
/*
* This file contains the interface to the LPAR configuration data
* to determine which resources should be allocated to each partition.
*/
-#include <asm/iSeries/HvCallSc.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/ItLpNaca.h>
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/it_lp_naca.h>
enum {
HvCallCfg_Cur = 0,
@@ -135,4 +135,4 @@ static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
return HvCall1(HvCallCfgGetHostingLpIndex, lp);
}
-#endif /* _HVLPCONFIG_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/include/asm-ppc64/iSeries/HvLpEvent.h b/include/asm-powerpc/iseries/hv_lp_event.h
index 865000de79b6..499ab1ad0185 100644
--- a/include/asm-ppc64/iSeries/HvLpEvent.h
+++ b/include/asm-powerpc/iseries/hv_lp_event.h
@@ -19,13 +19,13 @@
/* This file contains the class for HV events in the system. */
-#ifndef _HVLPEVENT_H
-#define _HVLPEVENT_H
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
#include <asm/types.h>
#include <asm/ptrace.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvCallEvent.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
/*
* HvLpEvent is the structure for Lp Event messages passed between
@@ -139,4 +139,4 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
#define HvLpDma_Rc_InvalidAddress 4
#define HvLpDma_Rc_InvalidLength 5
-#endif /* _HVLPEVENT_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/include/asm-ppc64/iSeries/HvTypes.h b/include/asm-powerpc/iseries/hv_types.h
index b1ef2b4cb3e3..c38f7e3d01dc 100644
--- a/include/asm-ppc64/iSeries/HvTypes.h
+++ b/include/asm-powerpc/iseries/hv_types.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _HVTYPES_H
-#define _HVTYPES_H
+#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
+#define _ASM_POWERPC_ISERIES_HV_TYPES_H
/*
* General typedefs for the hypervisor.
@@ -110,4 +110,4 @@ struct HvLpBufferList {
u64 len;
};
-#endif /* _HVTYPES_H */
+#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/include/asm-ppc64/iSeries/iSeries_io.h b/include/asm-powerpc/iseries/iseries_io.h
index 9f79413342b3..56b2113ff0f5 100644
--- a/include/asm-ppc64/iSeries/iSeries_io.h
+++ b/include/asm-powerpc/iseries/iseries_io.h
@@ -1,5 +1,5 @@
-#ifndef _ISERIES_IO_H
-#define _ISERIES_IO_H
+#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H
+#define _ASM_POWERPC_ISERIES_ISERIES_IO_H
#include <linux/config.h>
@@ -46,4 +46,4 @@ extern void iSeries_memcpy_fromio(void *dest,
const volatile void __iomem *source, size_t n);
#endif /* CONFIG_PPC_ISERIES */
-#endif /* _ISERIES_IO_H */
+#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
diff --git a/include/asm-ppc64/iSeries/ItExtVpdPanel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
index 4c546a8802b4..66a17a230c52 100644
--- a/include/asm-ppc64/iSeries/ItExtVpdPanel.h
+++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ITEXTVPDPANEL_H
-#define _ITEXTVPDPANEL_H
+#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
+#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
/*
* This struct maps the panel information
@@ -49,4 +49,4 @@ struct ItExtVpdPanel {
extern struct ItExtVpdPanel xItExtVpdPanel;
-#endif /* _ITEXTVPDPANEL_H */
+#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/include/asm-ppc64/iSeries/ItLpNaca.h b/include/asm-powerpc/iseries/it_lp_naca.h
index 225d0176779d..c3ef1de45d82 100644
--- a/include/asm-ppc64/iSeries/ItLpNaca.h
+++ b/include/asm-powerpc/iseries/it_lp_naca.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ITLPNACA_H
-#define _ITLPNACA_H
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H
+#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H
#include <linux/types.h>
@@ -77,4 +77,4 @@ struct ItLpNaca {
extern struct ItLpNaca itLpNaca;
-#endif /* _ITLPNACA_H */
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
diff --git a/include/asm-ppc64/iSeries/ItLpQueue.h b/include/asm-powerpc/iseries/it_lp_queue.h
index 69b26ad74135..a60d03afbf95 100644
--- a/include/asm-ppc64/iSeries/ItLpQueue.h
+++ b/include/asm-powerpc/iseries/it_lp_queue.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ITLPQUEUE_H
-#define _ITLPQUEUE_H
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
+#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
/*
* This control block defines the simple LP queue structure that is
@@ -78,4 +78,4 @@ extern int hvlpevent_is_pending(void);
extern void process_hvlpevents(struct pt_regs *);
extern void setup_hvlpevent_queue(void);
-#endif /* _ITLPQUEUE_H */
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/include/asm-ppc64/iSeries/ItLpRegSave.h b/include/asm-powerpc/iseries/it_lp_reg_save.h
index 1b3087e76205..288044b702de 100644
--- a/include/asm-ppc64/iSeries/ItLpRegSave.h
+++ b/include/asm-powerpc/iseries/it_lp_reg_save.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ITLPREGSAVE_H
-#define _ITLPREGSAVE_H
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
+#define _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
/*
* This control block contains the data that is shared between PLIC
diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-powerpc/iseries/lpar_map.h
index a6840b186d03..84fc321615bf 100644
--- a/include/asm-ppc64/iSeries/LparMap.h
+++ b/include/asm-powerpc/iseries/lpar_map.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _LPARMAP_H
-#define _LPARMAP_H
+#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
+#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
#ifndef __ASSEMBLY__
@@ -80,4 +80,4 @@ extern const struct LparMap xLparMap;
/* the fixed address where the LparMap exists */
#define LPARMAP_PHYS 0x7000
-#endif /* _LPARMAP_H */
+#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/include/asm-ppc64/iSeries/mf.h b/include/asm-powerpc/iseries/mf.h
index 7e6a0d936999..e7bd57a03fb1 100644
--- a/include/asm-ppc64/iSeries/mf.h
+++ b/include/asm-powerpc/iseries/mf.h
@@ -23,13 +23,13 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ASM_PPC64_ISERIES_MF_H
-#define _ASM_PPC64_ISERIES_MF_H
+#ifndef _ASM_POWERPC_ISERIES_MF_H
+#define _ASM_POWERPC_ISERIES_MF_H
#include <linux/types.h>
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvCallEvent.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
struct rtc_time;
@@ -54,4 +54,4 @@ extern int mf_get_rtc(struct rtc_time *tm);
extern int mf_get_boot_rtc(struct rtc_time *tm);
extern int mf_set_rtc(struct rtc_time *tm);
-#endif /* _ASM_PPC64_ISERIES_MF_H */
+#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-ppc64/iSeries/vio.h b/include/asm-powerpc/iseries/vio.h
index 6c05e6257f53..7e3a469420dd 100644
--- a/include/asm-ppc64/iSeries/vio.h
+++ b/include/asm-powerpc/iseries/vio.h
@@ -38,11 +38,11 @@
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-#ifndef _ISERIES_VIO_H
-#define _ISERIES_VIO_H
+#ifndef _ASM_POWERPC_ISERIES_VIO_H
+#define _ASM_POWERPC_ISERIES_VIO_H
-#include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
/*
* iSeries virtual I/O events use the subtype field in
@@ -127,4 +127,4 @@ struct device;
extern struct device *iSeries_vio_dev;
-#endif /* _ISERIES_VIO_H */
+#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
new file mode 100644
index 000000000000..062ab9ba68eb
--- /dev/null
+++ b/include/asm-powerpc/kexec.h
@@ -0,0 +1,49 @@
+#ifndef _ASM_POWERPC_KEXEC_H
+#define _ASM_POWERPC_KEXEC_H
+
+/*
+ * Maximum page that is mapped directly into kernel memory.
+ * XXX: Since we copy virt we can use any page we allocate
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/*
+ * Maximum address we can reach in physical address mode.
+ * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
+ */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#ifdef __powerpc64__
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+#else
+/* TASK_SIZE, probably left over from use_mm ?? */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#endif
+
+#define KEXEC_CONTROL_CODE_SIZE 4096
+
+/* The native architecture */
+#ifdef __powerpc64__
+#define KEXEC_ARCH KEXEC_ARCH_PPC64
+#else
+#define KEXEC_ARCH KEXEC_ARCH_PPC
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define MAX_NOTE_BYTES 1024
+typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)];
+
+extern note_buf_t crash_notes[];
+
+#ifdef __powerpc64__
+extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
+ master to copy new code to 0 */
+#else
+struct kimage;
+extern void machine_kexec_simple(struct kimage *image);
+#endif
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 451b345cfc78..629ca964b974 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -80,6 +80,7 @@ struct machdep_calls {
void (*iommu_dev_setup)(struct pci_dev *dev);
void (*iommu_bus_setup)(struct pci_bus *bus);
void (*irq_bus_setup)(struct pci_bus *bus);
+ int (*set_dabr)(unsigned long dabr);
#endif
int (*probe)(int platform);
diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h
new file mode 100644
index 000000000000..795533aca095
--- /dev/null
+++ b/include/asm-powerpc/numnodes.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_POWERPC_MAX_NUMNODES_H
+#define _ASM_POWERPC_MAX_NUMNODES_H
+
+/* Max 16 Nodes */
+#define NODES_SHIFT 4
+
+#endif /* _ASM_POWERPC_MAX_NUMNODES_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index f99f2af82ca5..c534ca41224b 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#else
#define __ASM_CONST(x) x##UL
#define ASM_CONST(x) __ASM_CONST(x)
+
+#ifdef CONFIG_PPC64
+#define DATAL ".llong"
+#else
+#define DATAL ".long"
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index eee954a001fd..1dc4bf7b52b3 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
#define PLATFORM_LPAR 0x0001
#define PLATFORM_POWERMAC 0x0400
#define PLATFORM_MAPLE 0x0500
-#define PLATFORM_BPA 0x1000
+#define PLATFORM_CELL 0x1000
/* Compatibility with drivers coming from PPC32 world */
#define _machine (systemcfg->platform)
diff --git a/include/asm-ppc64/ptrace.h b/include/asm-powerpc/ptrace.h
index 3a55377f1fd3..1f7ecdb0b6ce 100644
--- a/include/asm-ppc64/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_PTRACE_H
-#define _PPC64_PTRACE_H
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
@@ -16,7 +16,7 @@
* that the overall structure is a multiple of 16 bytes in length.
*
* Note that the offsets of the fields in this struct correspond with
- * the PT_* values below. This simplifies arch/ppc64/kernel/ptrace.c.
+ * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -30,70 +30,96 @@ struct pt_regs {
unsigned long gpr[32];
unsigned long nip;
unsigned long msr;
- unsigned long orig_gpr3; /* Used for restarting system calls */
+ unsigned long orig_gpr3; /* Used for restarting system calls */
unsigned long ctr;
unsigned long link;
unsigned long xer;
unsigned long ccr;
- unsigned long softe; /* Soft enabled/disabled */
- unsigned long trap; /* Reason for being here */
- unsigned long dar; /* Fault registers */
- unsigned long dsisr;
- unsigned long result; /* Result of a system call */
+#ifdef __powerpc64__
+ unsigned long softe; /* Soft enabled/disabled */
+#else
+ unsigned long mq; /* 601 only (not used at present) */
+ /* Used on APUS to hold IPL value. */
+#endif
+ unsigned long trap; /* Reason for being here */
+ /* N.B. for critical exceptions on 4xx, the dar and dsisr
+ fields are overloaded to hold srr0 and srr1. */
+ unsigned long dar; /* Fault registers */
+ unsigned long dsisr; /* on 4xx/Book-E used for ESR */
+ unsigned long result; /* Result of a system call */
};
-struct pt_regs32 {
- unsigned int gpr[32];
- unsigned int nip;
- unsigned int msr;
- unsigned int orig_gpr3; /* Used for restarting system calls */
- unsigned int ctr;
- unsigned int link;
- unsigned int xer;
- unsigned int ccr;
- unsigned int mq; /* 601 only (not used at present) */
- unsigned int trap; /* Reason for being here */
- unsigned int dar; /* Fault registers */
- unsigned int dsisr;
- unsigned int result; /* Result of a system call */
-};
+#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
-#define instruction_pointer(regs) ((regs)->nip)
+#ifdef __powerpc64__
+
+#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+
+/* Size of dummy stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 128
+#define __SIGNAL_FRAMESIZE32 64
+
+#else /* __powerpc64__ */
+
+#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
+
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 64
+
+#endif /* __powerpc64__ */
+#ifndef __ASSEMBLY__
+
+#define instruction_pointer(regs) ((regs)->nip)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif
+#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+#else
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+#endif
#define force_successful_syscall_return() \
- (current_thread_info()->syscall_noerror = 1)
+ do { \
+ current_thread_info()->syscall_noerror = 1; \
+ } while(0)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
* partial set. A 1 there means the partial set.
+ * On 4xx we use the next bit to indicate whether the exception
+ * is a critical exception (1 means it is).
*/
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
+#ifndef __powerpc64__
+#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
+#endif /* ! __powerpc64__ */
#define TRAP(regs) ((regs)->trap & ~0xF)
+#ifdef __powerpc64__
#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
-
-#endif /* __KERNEL__ */
+#else
+#define CHECK_FULL_REGS(regs) \
+do { \
+ if ((regs)->trap & 1) \
+ printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
+} while (0)
+#endif /* __powerpc64__ */
#endif /* __ASSEMBLY__ */
-#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
-
-/* Size of dummy stack frame allocated when calling signal handler. */
-#define __SIGNAL_FRAMESIZE 128
-#define __SIGNAL_FRAMESIZE32 64
+#endif /* __KERNEL__ */
/*
* Offsets used by 'ptrace' system call interface.
+ * These can't be changed without breaking binary compatibility
+ * with MkLinux, etc.
*/
#define PT_R0 0
#define PT_R1 1
@@ -137,18 +163,25 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define PT_LNK 36
#define PT_XER 37
#define PT_CCR 38
+#ifndef __powerpc64__
+#define PT_MQ 39
+#else
#define PT_SOFTE 39
#define PT_TRAP 40
#define PT_DAR 41
#define PT_DSISR 42
#define PT_RESULT 43
+#endif
-#define PT_FPR0 48
+#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
+
+#ifndef __powerpc64__
+
+#define PT_FPR31 (PT_FPR0 + 2*31)
+#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
+
+#else /* __powerpc64__ */
-/*
- * Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will
- * have visibility to the asm-ppc/ptrace.h header instead of this one.
- */
#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
#ifdef __KERNEL__
@@ -165,29 +198,29 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define PT_VRSAVE_32 (PT_VR0 + 33*4)
#endif
+#endif /* __powerpc64__ */
+
/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the same
- * structures. This also simplifies the implementation of a bi-arch
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
#define PTRACE_GETVRREGS 18
#define PTRACE_SETVRREGS 19
-/*
- * While we dont have 64bit book E processors, we need to reserve the
- * relevant ptrace calls for 32bit compatibility.
- */
-#if 0
-#define PTRACE_GETEVRREGS 20
-#define PTRACE_SETEVRREGS 21
-#endif
+#ifndef __powerpc64__
+/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
+ * spefscr, in one go */
+#define PTRACE_GETEVRREGS 20
+#define PTRACE_SETEVRREGS 21
+#endif /* __powerpc64__ */
/*
* Get or set a debug register. The first 16 are DABR registers and the
@@ -196,6 +229,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define PTRACE_GET_DEBUGREG 25
#define PTRACE_SET_DEBUGREG 26
+#ifdef __powerpc64__
/* Additional PTRACE requests implemented on PowerPC. */
#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
@@ -209,5 +243,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define PPC_PTRACE_POKEDATA_3264 0x92
#define PPC_PTRACE_PEEKUSR_3264 0x91
#define PPC_PTRACE_POKEUSR_3264 0x90
+#endif /* __powerpc64__ */
-#endif /* _PPC64_PTRACE_H */
+#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 2c050332471d..d1bb611ea626 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -149,28 +149,11 @@ struct rtas_error_log {
unsigned char buffer[1];
};
-struct flash_block {
- char *data;
- unsigned long length;
-};
-
-/* This struct is very similar but not identical to
- * that needed by the rtas flash update.
- * All we need to do for rtas is rewrite num_blocks
- * into a version/length and translate the pointers
- * to absolute.
+/*
+ * This can be set by the rtas_flash module so that it can get called
+ * as the absolutely last thing before the kernel terminates.
*/
-#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block))
-struct flash_block_list {
- unsigned long num_blocks;
- struct flash_block_list *next;
- struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
-};
-struct flash_block_list_header { /* just the header of flash_block_list */
- unsigned long num_blocks;
- struct flash_block_list *next;
-};
-extern struct flash_block_list_header rtas_firmware_flash_list;
+extern void (*rtas_flash_term_hook)(int);
extern struct rtas_t rtas;
diff --git a/include/asm-ppc64/sigcontext.h b/include/asm-powerpc/sigcontext.h
index 6f8aee768c5e..165d630e1cf3 100644
--- a/include/asm-ppc64/sigcontext.h
+++ b/include/asm-powerpc/sigcontext.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_PPC64_SIGCONTEXT_H
-#define _ASM_PPC64_SIGCONTEXT_H
+#ifndef _ASM_POWERPC_SIGCONTEXT_H
+#define _ASM_POWERPC_SIGCONTEXT_H
/*
* This program is free software; you can redistribute it and/or
@@ -9,39 +9,44 @@
*/
#include <linux/compiler.h>
#include <asm/ptrace.h>
+#ifdef __powerpc64__
#include <asm/elf.h>
-
+#endif
struct sigcontext {
unsigned long _unused[4];
int signal;
+#ifdef __powerpc64__
int _pad0;
+#endif
unsigned long handler;
unsigned long oldmask;
struct pt_regs __user *regs;
+#ifdef __powerpc64__
elf_gregset_t gp_regs;
elf_fpregset_t fp_regs;
/*
- * To maintain compatibility with current implementations the sigcontext is
- * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
- * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
- * allows the array of vector registers to be quadword aligned independent of
- * the alignment of the containing sigcontext or ucontext. It is the
- * responsibility of the code setting the sigcontext to set this pointer to
- * either NULL (if this processor does not support the VMX feature) or the
+ * To maintain compatibility with current implementations the sigcontext is
+ * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
+ * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
+ * allows the array of vector registers to be quadword aligned independent of
+ * the alignment of the containing sigcontext or ucontext. It is the
+ * responsibility of the code setting the sigcontext to set this pointer to
+ * either NULL (if this processor does not support the VMX feature) or the
* address of the first quadword within the allocated (vmx_reserve) area.
*
- * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
- * an array of 34 quadword entries (elf_vrregset_t). The entries with
- * indexes 0-31 contain the corresponding vector registers. The entry with
- * index 32 contains the vscr as the last word (offset 12) within the
- * quadword. This allows the vscr to be stored as either a quadword (since
- * it must be copied via a vector register to/from storage) or as a word.
- * The entry with index 33 contains the vrsave as the first word (offset 0)
+ * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
+ * an array of 34 quadword entries (elf_vrregset_t). The entries with
+ * indexes 0-31 contain the corresponding vector registers. The entry with
+ * index 32 contains the vscr as the last word (offset 12) within the
+ * quadword. This allows the vscr to be stored as either a quadword (since
+ * it must be copied via a vector register to/from storage) or as a word.
+ * The entry with index 33 contains the vrsave as the first word (offset 0)
* within the quadword.
*/
elf_vrreg_t __user *v_regs;
long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1];
+#endif
};
-#endif /* _ASM_PPC64_SIGCONTEXT_H */
+#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/include/asm-ppc64/smp.h b/include/asm-powerpc/smp.h
index c5e9052e7967..8bcdd0faefea 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -1,5 +1,5 @@
/*
- * smp.h: PPC64 specific SMP code.
+ * smp.h: PowerPC-specific SMP code.
*
* Original was a copy of sparc smp.h. Now heavily modified
* for PPC.
@@ -13,9 +13,9 @@
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_SMP_H
+#define _ASM_POWERPC_SMP_H
#ifdef __KERNEL__
-#ifndef _PPC64_SMP_H
-#define _PPC64_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
@@ -24,7 +24,9 @@
#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC64
#include <asm/paca.h>
+#endif
extern int boot_cpuid;
extern int boot_cpuid_phys;
@@ -45,8 +47,19 @@ void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
#endif
+#ifdef CONFIG_PPC64
#define raw_smp_processor_id() (get_paca()->paca_index)
#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+#else
+/* 32-bit */
+extern int smp_hw_index[];
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
+#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
+#define set_hard_smp_processor_id(cpu, phys)\
+ (smp_hw_index[(cpu)] = (phys))
+#endif
extern cpumask_t cpu_sibling_map[NR_CPUS];
@@ -64,14 +77,30 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
void smp_init_iSeries(void);
void smp_init_pSeries(void);
+void smp_init_cell(void);
+void smp_setup_cpu_maps(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
+
+#else
+/* for UP */
+#define smp_setup_cpu_maps()
+#define smp_release_cpus()
+
#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC64
#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
#define set_hard_smp_processor_id(CPU, VAL) \
do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
+#else
+/* 32-bit */
+#ifndef CONFIG_SMP
+#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
+#define set_hard_smp_processor_id(cpu, phys)
+#endif
+#endif
extern int smt_enabled_at_boot;
@@ -84,15 +113,7 @@ extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops;
-#ifdef CONFIG_PPC_PSERIES
-void vpa_init(int cpu);
-#else
-static inline void vpa_init(int cpu)
-{
-}
-#endif /* CONFIG_PPC_PSERIES */
-
#endif /* __ASSEMBLY__ */
-#endif /* !(_PPC64_SMP_H) */
#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/include/asm-ppc64/sparsemem.h b/include/asm-powerpc/sparsemem.h
index c5bd47e57f17..1c95ab99deb3 100644
--- a/include/asm-ppc64/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_PPC64_SPARSEMEM_H
-#define _ASM_PPC64_SPARSEMEM_H 1
+#ifndef _ASM_POWERPC_SPARSEMEM_H
+#define _ASM_POWERPC_SPARSEMEM_H 1
#ifdef CONFIG_SPARSEMEM
/*
@@ -13,4 +13,4 @@
#endif /* CONFIG_SPARSEMEM */
-#endif /* _ASM_PPC64_SPARSEMEM_H */
+#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/stat.h b/include/asm-powerpc/stat.h
new file mode 100644
index 000000000000..e4edc510b530
--- /dev/null
+++ b/include/asm-powerpc/stat.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_POWERPC_STAT_H
+#define _ASM_POWERPC_STAT_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+
+#define STAT_HAVE_NSEC 1
+
+#ifndef __powerpc64__
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+#endif /* !__powerpc64__ */
+
+struct stat {
+ unsigned long st_dev;
+ ino_t st_ino;
+#ifdef __powerpc64__
+ nlink_t st_nlink;
+ mode_t st_mode;
+#else
+ mode_t st_mode;
+ nlink_t st_nlink;
+#endif
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned long st_rdev;
+ off_t st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+#ifdef __powerpc64__
+ unsigned long __unused6;
+#endif
+};
+
+/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+struct stat64 {
+ unsigned long long st_dev; /* Device. */
+ unsigned long long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ unsigned short __pad2;
+ long long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ long long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#endif /* _ASM_POWERPC_STAT_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 5b2ecbc47907..b5da0b851e02 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -359,5 +359,53 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+static inline void create_instruction(unsigned long addr, unsigned int instr)
+{
+ unsigned int *p;
+ p = (unsigned int *)addr;
+ *p = instr;
+ asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
+}
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+static inline void create_branch(unsigned long addr,
+ unsigned long target, int flags)
+{
+ unsigned int instruction;
+
+ if (! (flags & BRANCH_ABSOLUTE))
+ target = target - addr;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
+
+ create_instruction(addr, instruction);
+}
+
+static inline void create_function_call(unsigned long addr, void * func)
+{
+ unsigned long func_addr;
+
+#ifdef CONFIG_PPC64
+ /*
+ * On PPC64 the function pointer actually points to the function's
+ * descriptor. The first entry in the descriptor is the address
+ * of the function text.
+ */
+ func_addr = *(unsigned long *)func;
+#else
+ func_addr = (unsigned long)func;
+#endif
+ create_branch(addr, func_addr, BRANCH_SET_LINK);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f83..7f80a019b6a0 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
#endif
-#define FIOCLEX _IO('f', 1)
-#define FIONCLEX _IO('f', 2)
-#define FIOASYNC _IOW('f', 125, int)
-#define FIONBIO _IOW('f', 126, int)
-#define FIONREAD _IOR('f', 127, int)
-#define TIOCINQ FIONREAD
-
-#define TIOCGETP _IOR('t', 8, struct sgttyb)
-#define TIOCSETP _IOW('t', 9, struct sgttyb)
-#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
-
-#define TIOCSETC _IOW('t', 17, struct tchars)
-#define TIOCGETC _IOR('t', 18, struct tchars)
-#define TCGETS _IOR('t', 19, struct termios)
-#define TCSETS _IOW('t', 20, struct termios)
-#define TCSETSW _IOW('t', 21, struct termios)
-#define TCSETSF _IOW('t', 22, struct termios)
-
-#define TCGETA _IOR('t', 23, struct termio)
-#define TCSETA _IOW('t', 24, struct termio)
-#define TCSETAW _IOW('t', 25, struct termio)
-#define TCSETAF _IOW('t', 28, struct termio)
-
-#define TCSBRK _IO('t', 29)
-#define TCXONC _IO('t', 30)
-#define TCFLSH _IO('t', 31)
-
-#define TIOCSWINSZ _IOW('t', 103, struct winsize)
-#define TIOCGWINSZ _IOR('t', 104, struct winsize)
-#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
-#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
-#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
-
-#define TIOCGLTC _IOR('t', 116, struct ltchars)
-#define TIOCSLTC _IOW('t', 117, struct ltchars)
-#define TIOCSPGRP _IOW('t', 118, int)
-#define TIOCGPGRP _IOR('t', 119, int)
-
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-
-#define TIOCSTI 0x5412
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
#ifdef __KERNEL__
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
-}
-
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#include <asm-generic/termios.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index 410e795f7d43..d9b86a17271b 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -21,7 +21,7 @@
#include <asm/processor.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
-#include <asm/iSeries/HvCall.h>
+#include <asm/iseries/hv_call.h>
#endif
/* time.c */
diff --git a/include/asm-ppc/tlb.h b/include/asm-powerpc/tlb.h
index 2c142c5d8584..56659f121779 100644
--- a/include/asm-ppc/tlb.h
+++ b/include/asm-powerpc/tlb.h
@@ -1,6 +1,7 @@
/*
- * TLB shootdown specifics for PPC
+ * TLB shootdown specifics for powerpc
*
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
@@ -8,29 +9,53 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _PPC_TLB_H
-#define _PPC_TLB_H
+#ifndef _ASM_POWERPC_TLB_H
+#define _ASM_POWERPC_TLB_H
#include <linux/config.h>
+#ifndef __powerpc64__
#include <asm/pgtable.h>
+#endif
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#ifndef __powerpc64__
#include <asm/page.h>
#include <asm/mmu.h>
-
-#ifdef CONFIG_PPC_STD_MMU
-/* Classic PPC with hash-table based MMU... */
+#endif
struct mmu_gather;
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+#if !defined(CONFIG_PPC_STD_MMU)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#elif defined(__powerpc64__)
+
+extern void pte_free_finish(void);
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ flush_tlb_pending();
+ pte_free_finish();
+}
+
+#else
+
extern void tlb_flush(struct mmu_gather *tlb);
+#endif
+
/* Get the generic bits... */
#include <asm-generic/tlb.h>
-/* Nothing needed here in fact... */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
+#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+#else
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
@@ -41,17 +66,5 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
flush_hash_entry(tlb->mm, ptep, address);
}
-#else
-/* Embedded PPC with software-loaded TLB, very simple... */
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-/* Get the generic bits... */
-#include <asm-generic/tlb.h>
-
-#endif /* CONFIG_PPC_STD_MMU */
-
-#endif /* __PPC_TLB_H */
+#endif
+#endif /* __ASM_POWERPC_TLB_H */
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
new file mode 100644
index 000000000000..ca3655672bbc
--- /dev/null
+++ b/include/asm-powerpc/tlbflush.h
@@ -0,0 +1,146 @@
+#ifndef _ASM_POWERPC_TLBFLUSH_H
+#define _ASM_POWERPC_TLBFLUSH_H
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+struct mm_struct;
+
+#ifdef CONFIG_PPC64
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+ unsigned long index;
+ struct mm_struct *mm;
+ pte_t pte[PPC64_TLB_BATCH_NR];
+ unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned int large;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+static inline void flush_tlb_pending(void)
+{
+ struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
+
+ if (batch->index)
+ __flush_tlb_pending(batch);
+ put_cpu_var(ppc64_tlb_batch);
+}
+
+extern void flush_hash_page(unsigned long va, pte_t pte, int local);
+void flush_hash_range(unsigned long number, int local);
+
+#else /* CONFIG_PPC64 */
+
+#include <linux/mm.h>
+
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+/*
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
+#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
+#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
+#define flush_tlb_pending() _tlbia()
+#endif
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+
+#endif /* CONFIG_PPC64 */
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
+ defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_pending();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+#ifdef CONFIG_PPC64
+ flush_tlb_pending();
+#else
+ _tlbie(vmaddr);
+#endif
+}
+
+static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+#ifndef CONFIG_PPC64
+ _tlbie(vmaddr);
+#endif
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_pending();
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_pending();
+}
+
+#else /* 6xx, 7xx, 7xxx cpus */
+
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#endif
+
+/*
+ * This is called in munmap when we have freed up some page-table
+ * pages. We don't need to do anything here, there's nothing special
+ * about our page-table pages. -- paulus
+ */
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+#endif /*__KERNEL__ */
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 000000000000..33af730f0d19
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
+#ifndef _ARCH_POWERPC_UACCESS_H
+#define _ARCH_POWERPC_UACCESS_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <asm/processor.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * The fs/ds values are now the highest legal address in the "segment".
+ * This simplifies the checking in the routines below.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#ifdef __powerpc64__
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
+#else
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.fs)
+#define set_fs(val) (current->thread.fs = (val))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#ifdef __powerpc64__
+/*
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
+ */
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && ((size) <= (segment).seg))
+
+#else
+
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && \
+ (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
+
+#endif
+
+#define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long fixup;
+};
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ * The "user64" versions of the user access functions are versions that
+ * allow access of 64-bit data. The "get_user" functions do not
+ * properly handle 64-bit data because the value gets down cast to a long.
+ * The "put_user" functions already handle 64-bit data properly but we add
+ * "user64" versions for completeness
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#ifndef __powerpc64__
+#define __get_user64(x, ptr) \
+ __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user64(x, ptr) __put_user(x, ptr)
+#endif
+
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+
+extern long __put_user_bad(void);
+
+#ifdef __powerpc64__
+#define __EX_TABLE_ALIGN "3"
+#define __EX_TABLE_TYPE "llong"
+#else
+#define __EX_TABLE_ALIGN "2"
+#define __EX_TABLE_TYPE "long"
+#endif
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: " op " %1,0(%2) # put_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ."__EX_TABLE_TYPE" 1b,3b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval) \
+ __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: stw %1,0(%2)\n" \
+ "2: stw %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,4b\n" \
+ " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
+ case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
+ case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
+ case 8: __put_user_asm2(x, ptr, retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ might_sleep(); \
+ __chk_user_ptr(ptr); \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2) # get_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " li %1,0\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align "__EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,3b\n" \
+ ".previous" \
+ : "=r" (err), "=r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err) \
+ __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: lwz %1,0(%2)\n" \
+ "2: lwz %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " li %1,0\n" \
+ " li %1+1,0\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,4b\n" \
+ " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ ".previous" \
+ : "=r" (err), "=&r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ if (size > sizeof(x)) \
+ (x) = __get_user_bad(); \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
+ case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
+ case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ unsigned long __gu_val; \
+ __chk_user_ptr(ptr); \
+ might_sleep(); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#ifndef __powerpc64__
+#define __get_user64_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ long long __gu_val; \
+ __chk_user_ptr(ptr); \
+ might_sleep(); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+#endif /* __powerpc64__ */
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+#ifndef __powerpc64__
+
+extern inline unsigned long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user((__force void __user *)to, from,
+ n - over) + over;
+ }
+ return n;
+}
+
+extern inline unsigned long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, (__force void __user *)from,
+ n - over) + over;
+ }
+ return n;
+}
+
+#else /* __powerpc64__ */
+
+#define __copy_in_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern unsigned long copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n);
+
+#endif /* __powerpc64__ */
+
+static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret);
+ break;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret);
+ break;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret);
+ break;
+ case 8:
+ __get_user_size(*(u64 *)to, from, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+ break;
+ case 2:
+ __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+ break;
+ case 4:
+ __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+ break;
+ case 8:
+ __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long __copy_from_user(void *to,
+ const void __user *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_from_user_inatomic(to, from, size);
+}
+
+static inline unsigned long __copy_to_user(void __user *to,
+ const void *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_to_user_inatomic(to, from, size);
+}
+
+extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ return __clear_user(addr, size);
+ if ((unsigned long)addr < TASK_SIZE) {
+ unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+ return __clear_user(addr, size - over) + over;
+ }
+ return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char __user *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char __user *src,
+ long count)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_READ, src, 1)))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+extern int __strnlen_user(const char __user *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline int strnlen_user(const char __user *str, long len)
+{
+ unsigned long top = current->thread.fs.seg;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len, top);
+}
+
+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/include/asm-powerpc/ucontext.h b/include/asm-powerpc/ucontext.h
new file mode 100644
index 000000000000..d9a4ddf0cc86
--- /dev/null
+++ b/include/asm-powerpc/ucontext.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_UCONTEXT_H
+#define _ASM_POWERPC_UCONTEXT_H
+
+#ifdef __powerpc64__
+#include <asm/sigcontext.h>
+#else
+#include <asm/elf.h>
+#endif
+#include <asm/signal.h>
+
+#ifndef __powerpc64__
+struct mcontext {
+ elf_gregset_t mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned long mc_pad[2];
+ elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
+};
+#endif
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext __user *uc_link;
+ stack_t uc_stack;
+#ifndef __powerpc64__
+ int uc_pad[7];
+ struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+#endif
+ sigset_t uc_sigmask;
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+#ifdef __powerpc64__
+ sigset_t __unused[15]; /* Allow for uc_sigmask growth */
+ struct sigcontext uc_mcontext; /* last for extensibility */
+#else
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext uc_mcontext;
+#endif
+};
+
+#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h
deleted file mode 100644
index e30f536fd830..000000000000
--- a/include/asm-ppc/bitops.h
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * bitops.h: Bit string operations on the ppc
- */
-
-#ifdef __KERNEL__
-#ifndef _PPC_BITOPS_H
-#define _PPC_BITOPS_H
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-/*
- * The test_and_*_bit operations are taken to imply a memory barrier
- * on SMP systems.
- */
-#ifdef CONFIG_SMP
-#define SMP_WMB "eieio\n"
-#define SMP_MB "\nsync"
-#else
-#define SMP_WMB
-#define SMP_MB
-#endif /* CONFIG_SMP */
-
-static __inline__ void set_bit(int nr, volatile unsigned long * addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- or %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc" );
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p |= mask;
-}
-
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-
-static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- andc %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p &= ~mask;
-}
-
-static __inline__ void change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- xor %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p ^= mask;
-}
-
-/*
- * test_and_*_bit do imply a memory barrier (?)
- */
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- or %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- andc %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- xor %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
-{
- return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
-}
-
-/* Return the bit position of the most significant 1 bit in a word */
-static __inline__ int __ilog2(unsigned long x)
-{
- int lz;
-
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
- return 31 - lz;
-}
-
-static __inline__ int ffz(unsigned long x)
-{
- if ((x = ~x) == 0)
- return 32;
- return __ilog2(x & -x);
-}
-
-static inline int __ffs(unsigned long x)
-{
- return __ilog2(x & -x);
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- return __ilog2(x & -x) + 1;
-}
-
-/*
- * fls: find last (most-significant) bit set.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-static __inline__ int fls(unsigned int x)
-{
- int lz;
-
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
- return 32 - lz;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Find the first bit set in a 140-bit bitmap.
- * The first 100 bits are unlikely to be set.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (tmp != ~0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != ~0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-
-#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
-
-static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
-{
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
-
- return (ADDR[nr >> 3] >> (nr & 7)) & 1;
-}
-
-/*
- * This implementation of ext2_find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
- */
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = cpu_to_le32p(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (tmp != ~0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = cpu_to_le32p(p++)) != ~0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = cpu_to_le32p(p);
-found_first:
- tmp |= ~0U << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
-#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
-
-#endif /* _PPC_BITOPS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/commproc.h b/include/asm-ppc/commproc.h
index 5bbb8e2c1c6d..973e60908234 100644
--- a/include/asm-ppc/commproc.h
+++ b/include/asm-ppc/commproc.h
@@ -83,6 +83,8 @@ extern uint m8xx_cpm_hostalloc(uint size);
extern int m8xx_cpm_hostfree(uint start);
extern void m8xx_cpm_hostdump(void);
+extern void cpm_load_patch(volatile immap_t *immr);
+
/* Buffer descriptors used by many of the CPM protocols.
*/
typedef struct cpm_buf_desc {
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h
deleted file mode 100644
index 9feff4ce1424..000000000000
--- a/include/asm-ppc/futex.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#ifdef __KERNEL__
-
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
-#endif
diff --git a/include/asm-ppc/ipcbuf.h b/include/asm-ppc/ipcbuf.h
deleted file mode 100644
index fab6752c7480..000000000000
--- a/include/asm-ppc/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __PPC_IPCBUF_H__
-#define __PPC_IPCBUF_H__
-
-/*
- * The ipc64_perm structure for PPC architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 1 32-bit value to fill up for 8-byte alignment
- * - 2 miscellaneous 64-bit values (so that this structure matches
- * PPC64 ipc64_perm)
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned long seq;
- unsigned int __pad2;
- unsigned long long __unused1;
- unsigned long long __unused2;
-};
-
-#endif /* __PPC_IPCBUF_H__ */
diff --git a/include/asm-ppc/kexec.h b/include/asm-ppc/kexec.h
deleted file mode 100644
index 6d2aa0aa4642..000000000000
--- a/include/asm-ppc/kexec.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _PPC_KEXEC_H
-#define _PPC_KEXEC_H
-
-#ifdef CONFIG_KEXEC
-
-/*
- * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
- * I.e. Maximum page that is mapped directly into kernel memory,
- * and kmap is not required.
- *
- * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
- * calculation for the amount of memory directly mappable into the
- * kernel memory space.
- */
-
-/* Maximum physical address we can use pages from */
-#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
-/* Maximum address we can reach in physical address mode */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
-/* Maximum address we can use for the control code buffer */
-#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-
-#define KEXEC_CONTROL_CODE_SIZE 4096
-
-/* The native architecture */
-#define KEXEC_ARCH KEXEC_ARCH_PPC
-
-#ifndef __ASSEMBLY__
-
-extern void *crash_notes;
-
-struct kimage;
-
-extern void machine_kexec_simple(struct kimage *image);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_KEXEC */
-
-#endif /* _PPC_KEXEC_H */
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h
deleted file mode 100644
index c34fb4e37a97..000000000000
--- a/include/asm-ppc/ptrace.h
+++ /dev/null
@@ -1,152 +0,0 @@
-#ifndef _PPC_PTRACE_H
-#define _PPC_PTRACE_H
-
-/*
- * This struct defines the way the registers are stored on the
- * kernel stack during a system call or other kernel entry.
- *
- * this should only contain volatile regs
- * since we can keep non-volatile in the thread_struct
- * should set this up when only volatiles are saved
- * by intr code.
- *
- * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
- * that the overall structure is a multiple of 16 bytes in length.
- *
- * Note that the offsets of the fields in this struct correspond with
- * the PT_* values below. This simplifies arch/ppc/kernel/ptrace.c.
- */
-
-#ifndef __ASSEMBLY__
-struct pt_regs {
- unsigned long gpr[32];
- unsigned long nip;
- unsigned long msr;
- unsigned long orig_gpr3; /* Used for restarting system calls */
- unsigned long ctr;
- unsigned long link;
- unsigned long xer;
- unsigned long ccr;
- unsigned long mq; /* 601 only (not used at present) */
- /* Used on APUS to hold IPL value. */
- unsigned long trap; /* Reason for being here */
- /* N.B. for critical exceptions on 4xx, the dar and dsisr
- fields are overloaded to hold srr0 and srr1. */
- unsigned long dar; /* Fault registers */
- unsigned long dsisr; /* on 4xx/Book-E used for ESR */
- unsigned long result; /* Result of a system call */
-};
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
-
-/* Size of stack frame allocated when calling signal handler. */
-#define __SIGNAL_FRAMESIZE 64
-
-#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
-
-#define force_successful_syscall_return() \
- do { \
- current_thread_info()->syscall_noerror = 1; \
- } while(0)
-
-/*
- * We use the least-significant bit of the trap field to indicate
- * whether we have saved the full set of registers, or only a
- * partial set. A 1 there means the partial set.
- * On 4xx we use the next bit to indicate whether the exception
- * is a critical exception (1 means it is).
- */
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
-#define TRAP(regs) ((regs)->trap & ~0xF)
-
-#define CHECK_FULL_REGS(regs) \
-do { \
- if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
-} while (0)
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-/*
- * Offsets used by 'ptrace' system call interface.
- * These can't be changed without breaking binary compatibility
- * with MkLinux, etc.
- */
-#define PT_R0 0
-#define PT_R1 1
-#define PT_R2 2
-#define PT_R3 3
-#define PT_R4 4
-#define PT_R5 5
-#define PT_R6 6
-#define PT_R7 7
-#define PT_R8 8
-#define PT_R9 9
-#define PT_R10 10
-#define PT_R11 11
-#define PT_R12 12
-#define PT_R13 13
-#define PT_R14 14
-#define PT_R15 15
-#define PT_R16 16
-#define PT_R17 17
-#define PT_R18 18
-#define PT_R19 19
-#define PT_R20 20
-#define PT_R21 21
-#define PT_R22 22
-#define PT_R23 23
-#define PT_R24 24
-#define PT_R25 25
-#define PT_R26 26
-#define PT_R27 27
-#define PT_R28 28
-#define PT_R29 29
-#define PT_R30 30
-#define PT_R31 31
-
-#define PT_NIP 32
-#define PT_MSR 33
-#ifdef __KERNEL__
-#define PT_ORIG_R3 34
-#endif
-#define PT_CTR 35
-#define PT_LNK 36
-#define PT_XER 37
-#define PT_CCR 38
-#define PT_MQ 39
-
-#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
-#define PT_FPR31 (PT_FPR0 + 2*31)
-#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
-
-/* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go */
-#define PTRACE_GETVRREGS 18
-#define PTRACE_SETVRREGS 19
-
-/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
- * spefscr, in one go */
-#define PTRACE_GETEVRREGS 20
-#define PTRACE_SETEVRREGS 21
-
-/*
- * Get or set a debug register. The first 16 are DABR registers and the
- * second 16 are IABR registers.
- */
-#define PTRACE_GET_DEBUGREG 25
-#define PTRACE_SET_DEBUGREG 26
-
-#endif
diff --git a/include/asm-ppc/sigcontext.h b/include/asm-ppc/sigcontext.h
deleted file mode 100644
index b7a417e0a921..000000000000
--- a/include/asm-ppc/sigcontext.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_PPC_SIGCONTEXT_H
-#define _ASM_PPC_SIGCONTEXT_H
-
-#include <asm/ptrace.h>
-#include <linux/compiler.h>
-
-struct sigcontext {
- unsigned long _unused[4];
- int signal;
- unsigned long handler;
- unsigned long oldmask;
- struct pt_regs __user *regs;
-};
-
-#endif
diff --git a/include/asm-ppc/stat.h b/include/asm-ppc/stat.h
deleted file mode 100644
index cadb34298496..000000000000
--- a/include/asm-ppc/stat.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _PPC_STAT_H
-#define _PPC_STAT_H
-
-#ifdef __KERNEL__
-#include <linux/types.h>
-#endif /* __KERNEL__ */
-
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_mtime;
- unsigned long st_ctime;
-};
-
-#define STAT_HAVE_NSEC 1
-
-struct stat {
- unsigned st_dev;
- ino_t st_ino;
- mode_t st_mode;
- nlink_t st_nlink;
- uid_t st_uid;
- gid_t st_gid;
- unsigned st_rdev;
- off_t st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1.
- */
-struct stat64 {
- unsigned long long st_dev; /* Device. */
- unsigned long long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long long st_rdev; /* Device number, if device. */
- unsigned short int __pad2;
- long long st_size; /* Size of file, in bytes. */
- long st_blksize; /* Optimal block size for I/O. */
-
- long long st_blocks; /* Number 512-byte blocks allocated. */
- long st_atime; /* Time of last access. */
- unsigned long st_atime_nsec;
- long st_mtime; /* Time of last modification. */
- unsigned long int st_mtime_nsec;
- long st_ctime; /* Time of last status change. */
- unsigned long int st_ctime_nsec;
- unsigned long int __unused4;
- unsigned long int __unused5;
-};
-#endif
diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h
deleted file mode 100644
index 9afee4ffc835..000000000000
--- a/include/asm-ppc/tlbflush.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * include/asm-ppc/tlbflush.h
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-#ifndef _PPC_TLBFLUSH_H
-#define _PPC_TLBFLUSH_H
-
-#include <linux/config.h>
-#include <linux/mm.h>
-
-extern void _tlbie(unsigned long address);
-extern void _tlbia(void);
-
-#if defined(CONFIG_4xx)
-
-#ifndef CONFIG_44x
-#define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory")
-#else
-#define __tlbia _tlbia
-#endif
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#elif defined(CONFIG_FSL_BOOKE)
-
-/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
- * are best implemented as tlbia vs specific tlbie's */
-
-#define __tlbia() _tlbia()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#elif defined(CONFIG_8xx)
-#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#else /* 6xx, 7xx, 7xxx cpus */
-struct mm_struct;
-struct vm_area_struct;
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-#endif
-
-/*
- * This is called in munmap when we have freed up some page-table
- * pages. We don't need to do anything here, there's nothing special
- * about our page-table pages. -- paulus
- */
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- * On machines which use an MMU hash table, we use this to put a
- * corresponding HPTE into the hash table ahead of time, instead of
- * waiting for the inevitable extra hash-table miss exception.
- */
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
-
-#endif /* _PPC_TLBFLUSH_H */
-#endif /*__KERNEL__ */
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h
deleted file mode 100644
index 63f56224da8c..000000000000
--- a/include/asm-ppc/uaccess.h
+++ /dev/null
@@ -1,393 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_UACCESS_H
-#define _PPC_UACCESS_H
-
-#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/processor.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * The fs/ds values are now the highest legal address in the "segment".
- * This simplifies the checking in the routines below.
- */
-
-#define KERNEL_DS ((mm_segment_t) { ~0UL })
-#define USER_DS ((mm_segment_t) { TASK_SIZE - 1 })
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current->thread.fs)
-#define set_fs(val) (current->thread.fs = (val))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-#define __access_ok(addr,size) \
- ((addr) <= current->thread.fs.seg \
- && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
-
-#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- *
- * As we use the same address space for kernel and user data on the
- * PowerPC, we can just do these as direct assignments. (Of course, the
- * exception handling means that it's no longer "just"...)
- *
- * The "user64" versions of the user access functions are versions that
- * allow access of 64-bit data. The "get_user" functions do not
- * properly handle 64-bit data because the value gets down cast to a long.
- * The "put_user" functions already handle 64-bit data properly but we add
- * "user64" versions for completeness
- */
-#define get_user(x,ptr) \
- __get_user_check((x),(ptr),sizeof(*(ptr)))
-#define get_user64(x,ptr) \
- __get_user64_check((x),(ptr),sizeof(*(ptr)))
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define put_user64(x,ptr) put_user(x,ptr)
-
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __get_user64(x,ptr) \
- __get_user64_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define __put_user64(x,ptr) __put_user(x,ptr)
-
-extern long __put_user_bad(void);
-
-#define __put_user_nocheck(x,ptr,size) \
-({ \
- long __pu_err; \
- __chk_user_ptr(ptr); \
- __put_user_size((x),(ptr),(size),__pu_err); \
- __pu_err; \
-})
-
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err); \
- __pu_err; \
-})
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __put_user_asm(x, ptr, retval, "stb"); \
- break; \
- case 2: \
- __put_user_asm(x, ptr, retval, "sth"); \
- break; \
- case 4: \
- __put_user_asm(x, ptr, retval, "stw"); \
- break; \
- case 8: \
- __put_user_asm2(x, ptr, retval); \
- break; \
- default: \
- __put_user_bad(); \
- } \
-} while (0)
-
-/*
- * We don't tell gcc that we are accessing memory, but this is OK
- * because we do not write to any memory gcc knows about, so there
- * are no aliasing issues.
- */
-#define __put_user_asm(x, addr, err, op) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2)\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-
-#define __put_user_asm2(x, addr, err) \
- __asm__ __volatile__( \
- "1: stw %1,0(%2)\n" \
- "2: stw %1+1,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: li %0,%3\n" \
- " b 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-
-#define __get_user_nocheck(x, ptr, size) \
-({ \
- long __gu_err; \
- unsigned long __gu_val; \
- __chk_user_ptr(ptr); \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user64_nocheck(x, ptr, size) \
-({ \
- long __gu_err; \
- long long __gu_val; \
- __chk_user_ptr(ptr); \
- __get_user_size64(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user_check(x, ptr, size) \
-({ \
- long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user64_check(x, ptr, size) \
-({ \
- long __gu_err = -EFAULT; \
- long long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
- __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-extern long __get_user_bad(void);
-
-#define __get_user_size(x, ptr, size, retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __get_user_asm(x, ptr, retval, "lbz"); \
- break; \
- case 2: \
- __get_user_asm(x, ptr, retval, "lhz"); \
- break; \
- case 4: \
- __get_user_asm(x, ptr, retval, "lwz"); \
- break; \
- default: \
- x = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_size64(x, ptr, size, retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __get_user_asm(x, ptr, retval, "lbz"); \
- break; \
- case 2: \
- __get_user_asm(x, ptr, retval, "lhz"); \
- break; \
- case 4: \
- __get_user_asm(x, ptr, retval, "lwz"); \
- break; \
- case 8: \
- __get_user_asm2(x, ptr, retval); \
- break; \
- default: \
- x = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm(x, addr, err, op) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2)\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " li %1,0\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r"(err), "=r"(x) \
- : "b"(addr), "i"(-EFAULT), "0"(err))
-
-#define __get_user_asm2(x, addr, err) \
- __asm__ __volatile__( \
- "1: lwz %1,0(%2)\n" \
- "2: lwz %1+1,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: li %0,%3\n" \
- " li %1,0\n" \
- " li %1+1,0\n" \
- " b 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
- : "=r"(err), "=&r"(x) \
- : "b"(addr), "i"(-EFAULT), "0"(err))
-
-/* more complex routines */
-
-extern int __copy_tofrom_user(void __user *to, const void __user *from,
- unsigned long size);
-
-extern inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long over;
-
- if (access_ok(VERIFY_READ, from, n))
- return __copy_tofrom_user((__force void __user *)to, from, n);
- if ((unsigned long)from < TASK_SIZE) {
- over = (unsigned long)from + n - TASK_SIZE;
- return __copy_tofrom_user((__force void __user *)to, from, n - over) + over;
- }
- return n;
-}
-
-extern inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- unsigned long over;
-
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_tofrom_user(to, (__force void __user *) from, n);
- if ((unsigned long)to < TASK_SIZE) {
- over = (unsigned long)to + n - TASK_SIZE;
- return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over;
- }
- return n;
-}
-
-static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size)
-{
- return __copy_tofrom_user((__force void __user *)to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size)
-{
- return __copy_tofrom_user(to, (__force void __user *)from, size);
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
-
-extern inline unsigned long
-clear_user(void __user *addr, unsigned long size)
-{
- if (access_ok(VERIFY_WRITE, addr, size))
- return __clear_user(addr, size);
- if ((unsigned long)addr < TASK_SIZE) {
- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
- return __clear_user(addr, size - over) + over;
- }
- return size;
-}
-
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-extern inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-extern __inline__ int strnlen_user(const char __user *str, long len)
-{
- unsigned long top = current->thread.fs.seg;
-
- if ((unsigned long)str > top)
- return 0;
- return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _PPC_UACCESS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ucontext.h b/include/asm-ppc/ucontext.h
deleted file mode 100644
index 664bc984d51f..000000000000
--- a/include/asm-ppc/ucontext.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _ASMPPC_UCONTEXT_H
-#define _ASMPPC_UCONTEXT_H
-
-#include <asm/elf.h>
-#include <asm/signal.h>
-
-struct mcontext {
- elf_gregset_t mc_gregs;
- elf_fpregset_t mc_fregs;
- unsigned long mc_pad[2];
- elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
-};
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext __user *uc_link;
- stack_t uc_stack;
- int uc_pad[7];
- struct mcontext __user *uc_regs;/* points to uc_mcontext field */
- sigset_t uc_sigmask;
- /* glibc has 1024-bit signal masks, ours are 64-bit */
- int uc_maskext[30];
- int uc_pad2[3];
- struct mcontext uc_mcontext;
-};
-
-#endif /* !_ASMPPC_UCONTEXT_H */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
deleted file mode 100644
index dbfa42ef4a99..000000000000
--- a/include/asm-ppc64/bitops.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * PowerPC64 atomic bit operations.
- * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
- * Anton Blanchard
- *
- * Originally taken from the 32b PPC code. Modified to use 64b values for
- * the various counters & memory references.
- *
- * Bitops are odd when viewed on big-endian systems. They were designed
- * on little endian so the size of the bitset doesn't matter (low order bytes
- * come first) as long as the bit in question is valid.
- *
- * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
- * our best to stay compatible with that. The assumption is that val will
- * be unsigned long for such tests. As such, we assume the bits are stored
- * as an array of unsigned long (the usual case is a single unsigned long,
- * of course). Here's an example bitset with bit numbering:
- *
- * |63..........0|127........64|195.......128|255.......196|
- *
- * This leads to a problem. If an int, short or char is passed as a bitset
- * it will be a bad memory reference since we want to store in chunks
- * of unsigned long (64 bits here) size.
- *
- * There are a few little-endian macros used mostly for filesystem bitmaps,
- * these work on similar bit arrays layouts, but byte-oriented:
- *
- * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
- *
- * The main difference is that bit 3-5 in the bit number field needs to be
- * reversed compared to the big-endian bit fields. This can be achieved
- * by XOR with 0b111000 (0x38).
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_BITOPS_H
-#define _PPC64_BITOPS_H
-
-#ifdef __KERNEL__
-
-#include <asm/synch.h>
-
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-
-static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr)
-{
- return (1UL & (addr[nr >> 6] >> (nr & 63)));
-}
-
-static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # set_bit\n\
- or %0,%0,%2\n\
- stdcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # clear_bit\n\
- andc %0,%0,%2\n\
- stdcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # change_bit\n\
- xor %0,%0,%2\n\
- stdcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old, t;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # test_and_set_bit\n\
- or %1,%0,%2 \n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old, t;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # test_and_clear_bit\n\
- andc %1,%0,%2\n\
- stdcx. %1,0,%3\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long old, t;
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # test_and_change_bit\n\
- xor %1,%0,%2\n\
- stdcx. %1,0,%3\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
-{
- unsigned long old;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # set_bit\n\
- or %0,%0,%2\n\
- stdcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=m" (*addr)
- : "r" (mask), "r" (addr), "m" (*addr)
- : "cc");
-}
-
-/*
- * non-atomic versions
- */
-static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- *p |= mask;
-}
-
-static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- *p &= ~mask;
-}
-
-static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
-
- *p ^= mask;
-}
-
-static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x3f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-/*
- * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
- * most significant (left-most) 1-bit in a double word.
- */
-static __inline__ int __ilog2(unsigned long x)
-{
- int lz;
-
- asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
- return 63 - lz;
-}
-
-/*
- * Determines the bit position of the least significant (rightmost) 0 bit
- * in the specified double word. The returned bit position will be zero-based,
- * starting from the right side (63 - 0).
- */
-static __inline__ unsigned long ffz(unsigned long x)
-{
- /* no zero exists anywhere in the 8 byte area. */
- if ((x = ~x) == 0)
- return 64;
-
- /*
- * Calculate the bit position of the least signficant '1' bit in x
- * (since x has been changed this will actually be the least signficant
- * '0' bit in * the original x). Note: (x & -x) gives us a mask that
- * is the least significant * (RIGHT-most) 1-bit of the value in x.
- */
- return __ilog2(x & -x);
-}
-
-static __inline__ int __ffs(unsigned long x)
-{
- return __ilog2(x & -x);
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- unsigned long i = (unsigned long)x;
- return __ilog2(i & -i) + 1;
-}
-
-/*
- * fls: find last (most-significant) bit set.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-#define fls(x) generic_fls(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
-#define find_first_zero_le_bit(addr, size) \
- find_next_zero_le_bit((addr), (size), 0)
-
-static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr)
-{
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
- return (ADDR[nr >> 3] >> (nr & 7)) & 1;
-}
-
-#define test_and_clear_le_bit(nr, addr) \
- test_and_clear_bit((nr) ^ 0x38, (addr))
-#define test_and_set_le_bit(nr, addr) \
- test_and_set_bit((nr) ^ 0x38, (addr))
-
-/*
- * non-atomic versions
- */
-
-#define __set_le_bit(nr, addr) \
- __set_bit((nr) ^ 0x38, (addr))
-#define __clear_le_bit(nr, addr) \
- __clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_clear_le_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_set_le_bit(nr, addr) \
- __test_and_set_bit((nr) ^ 0x38, (addr))
-
-#define ext2_set_bit(nr,addr) \
- __test_and_set_le_bit((nr), (unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_le_bit((nr), (unsigned long*)addr)
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_le_bit((nr), (unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_le_bit((nr), (unsigned long*)addr)
-
-
-#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_le_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_le_bit((unsigned long*)addr, size, off)
-
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-#endif /* __KERNEL__ */
-#endif /* _PPC64_BITOPS_H */
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
deleted file mode 100644
index cdf8a2dec05f..000000000000
--- a/include/asm-ppc64/dart.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_DART_H
-#define _ASM_DART_H
-
-
-/* physical base of DART registers */
-#define DART_BASE 0xf8033000UL
-
-/* Offset from base to control register */
-#define DARTCNTL 0
-/* Offset from base to exception register */
-#define DARTEXCP 0x10
-/* Offset from base to TLB tag registers */
-#define DARTTAG 0x1000
-
-
-/* Control Register fields */
-
-/* base address of table (pfn) */
-#define DARTCNTL_BASE_MASK 0xfffff
-#define DARTCNTL_BASE_SHIFT 12
-
-#define DARTCNTL_FLUSHTLB 0x400
-#define DARTCNTL_ENABLE 0x200
-
-/* size of table in pages */
-#define DARTCNTL_SIZE_MASK 0x1ff
-#define DARTCNTL_SIZE_SHIFT 0
-
-
-/* DART table fields */
-
-#define DARTMAP_VALID 0x80000000
-#define DARTMAP_RPNMASK 0x00ffffff
-
-
-#define DART_PAGE_SHIFT 12
-#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
-#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
-
-
-#endif
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index bd7c9532d77b..77fc07c3c6bd 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -13,7 +13,7 @@
#include <asm/page.h>
#include <asm/byteorder.h>
#ifdef CONFIG_PPC_ISERIES
-#include <asm/iSeries/iSeries_io.h>
+#include <asm/iseries/iseries_io.h>
#endif
#include <asm/synch.h>
#include <asm/delay.h>
diff --git a/include/asm-ppc64/ipcbuf.h b/include/asm-ppc64/ipcbuf.h
deleted file mode 100644
index fa393c8342af..000000000000
--- a/include/asm-ppc64/ipcbuf.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __PPC64_IPCBUF_H__
-#define __PPC64_IPCBUF_H__
-
-/*
- * The ipc64_perm structure for the PPC is identical to kern_ipc_perm
- * as we have always had 32-bit UIDs and GIDs in the kernel.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned int seq;
- unsigned int __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __PPC64_IPCBUF_H__ */
diff --git a/include/asm-ppc64/kexec.h b/include/asm-ppc64/kexec.h
deleted file mode 100644
index 511908afaeeb..000000000000
--- a/include/asm-ppc64/kexec.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _PPC64_KEXEC_H
-#define _PPC64_KEXEC_H
-
-/*
- * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
- * I.e. Maximum page that is mapped directly into kernel memory,
- * and kmap is not required.
- */
-
-/* Maximum physical address we can use pages from */
-/* XXX: since we copy virt we can use any page we allocate */
-#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
-
-/* Maximum address we can reach in physical address mode */
-/* XXX: I want to allow initrd in highmem. otherwise set to rmo on lpar */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
-
-/* Maximum address we can use for the control code buffer */
-/* XXX: unused today, ppc32 uses TASK_SIZE, probably left over from use_mm */
-#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
-
-/* XXX: today we don't use this at all, althogh we have a static stack */
-#define KEXEC_CONTROL_CODE_SIZE 4096
-
-/* The native architecture */
-#define KEXEC_ARCH KEXEC_ARCH_PPC64
-
-#define MAX_NOTE_BYTES 1024
-
-#ifndef __ASSEMBLY__
-
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
-
-extern note_buf_t crash_notes[];
-
-extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
- master to copy new code to 0 */
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PPC_KEXEC_H */
-
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index 77a743402db4..820dd729b895 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -16,21 +16,6 @@
* 2 of the License, or (at your option) any later version.
*/
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-}
-
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h
deleted file mode 100644
index d2afe6447597..000000000000
--- a/include/asm-ppc64/naca.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _NACA_H
-#define _NACA_H
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/types.h>
-
-struct naca_struct {
- /* Kernel only data - undefined for user space */
- void *xItVpdAreas; /* VPD Data 0x00 */
- void *xRamDisk; /* iSeries ramdisk 0x08 */
- u64 xRamDiskSize; /* In pages 0x10 */
-};
-
-extern struct naca_struct naca;
-
-#endif /* _NACA_H */
diff --git a/include/asm-ppc64/numnodes.h b/include/asm-ppc64/numnodes.h
deleted file mode 100644
index 75ae0b906708..000000000000
--- a/include/asm-ppc64/numnodes.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_MAX_NUMNODES_H
-#define _ASM_MAX_NUMNODES_H
-
-/* Max 16 Nodes */
-#define NODES_SHIFT 4
-
-#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h
index dfaa21566c9a..def47d720d3d 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-ppc64/nvram.h
@@ -70,7 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
extern int pSeries_nvram_init(void);
extern int pmac_nvram_init(void);
-extern int bpa_nvram_init(void);
+extern int mmio_nvram_init(void);
/* PowerMac specific nvram stuffs */
diff --git a/include/asm-ppc64/paca.h b/include/asm-ppc64/paca.h
index 2f0f36f73d38..f68fe91debaf 100644
--- a/include/asm-ppc64/paca.h
+++ b/include/asm-ppc64/paca.h
@@ -19,7 +19,7 @@
#include <linux/config.h>
#include <asm/types.h>
#include <asm/lppaca.h>
-#include <asm/iSeries/ItLpRegSave.h>
+#include <asm/iseries/it_lp_reg_save.h>
#include <asm/mmu.h>
register struct paca_struct *local_paca asm("r13");
diff --git a/include/asm-ppc64/plpar_wrappers.h b/include/asm-ppc64/plpar_wrappers.h
deleted file mode 100644
index 72dd2449ee76..000000000000
--- a/include/asm-ppc64/plpar_wrappers.h
+++ /dev/null
@@ -1,120 +0,0 @@
-#ifndef _PPC64_PLPAR_WRAPPERS_H
-#define _PPC64_PLPAR_WRAPPERS_H
-
-#include <asm/hvcall.h>
-
-static inline long poll_pending(void)
-{
- unsigned long dummy;
- return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0,
- &dummy, &dummy, &dummy);
-}
-
-static inline long prod_processor(void)
-{
- plpar_hcall_norets(H_PROD);
- return(0);
-}
-
-static inline long cede_processor(void)
-{
- plpar_hcall_norets(H_CEDE);
- return(0);
-}
-
-static inline long register_vpa(unsigned long flags, unsigned long proc,
- unsigned long vpa)
-{
- return plpar_hcall_norets(H_REGISTER_VPA, flags, proc, vpa);
-}
-
-void vpa_init(int cpu);
-
-static inline long plpar_pte_remove(unsigned long flags,
- unsigned long ptex,
- unsigned long avpn,
- unsigned long *old_pteh_ret,
- unsigned long *old_ptel_ret)
-{
- unsigned long dummy;
- return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0,
- old_pteh_ret, old_ptel_ret, &dummy);
-}
-
-static inline long plpar_pte_read(unsigned long flags,
- unsigned long ptex,
- unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
-{
- unsigned long dummy;
- return plpar_hcall(H_READ, flags, ptex, 0, 0,
- old_pteh_ret, old_ptel_ret, &dummy);
-}
-
-static inline long plpar_pte_protect(unsigned long flags,
- unsigned long ptex,
- unsigned long avpn)
-{
- return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
-}
-
-static inline long plpar_tce_get(unsigned long liobn,
- unsigned long ioba,
- unsigned long *tce_ret)
-{
- unsigned long dummy;
- return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0,
- tce_ret, &dummy, &dummy);
-}
-
-static inline long plpar_tce_put(unsigned long liobn,
- unsigned long ioba,
- unsigned long tceval)
-{
- return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
-}
-
-static inline long plpar_tce_put_indirect(unsigned long liobn,
- unsigned long ioba,
- unsigned long page,
- unsigned long count)
-{
- return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
-}
-
-static inline long plpar_tce_stuff(unsigned long liobn,
- unsigned long ioba,
- unsigned long tceval,
- unsigned long count)
-{
- return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
-}
-
-static inline long plpar_get_term_char(unsigned long termno,
- unsigned long *len_ret,
- char *buf_ret)
-{
- unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */
- return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0,
- len_ret, lbuf+0, lbuf+1);
-}
-
-static inline long plpar_put_term_char(unsigned long termno,
- unsigned long len,
- const char *buffer)
-{
- unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */
- return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
- lbuf[1]);
-}
-
-static inline long plpar_set_xdabr(unsigned long address, unsigned long flags)
-{
- return plpar_hcall_norets(H_SET_XDABR, address, flags);
-}
-
-static inline long plpar_set_dabr(unsigned long val)
-{
- return plpar_hcall_norets(H_SET_DABR, val);
-}
-
-#endif /* _PPC64_PLPAR_WRAPPERS_H */
diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h
deleted file mode 100644
index 3945a55d112a..000000000000
--- a/include/asm-ppc64/ppc32.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef _PPC64_PPC32_H
-#define _PPC64_PPC32_H
-
-#include <linux/compat.h>
-#include <asm/siginfo.h>
-#include <asm/signal.h>
-
-/*
- * Data types and macros for providing 32b PowerPC support.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* These are here to support 32-bit syscalls on a 64-bit kernel. */
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- compat_uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- compat_uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- compat_uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
-#define __old_sigaction32 old_sigaction32
-
-struct __old_sigaction32 {
- compat_uptr_t sa_handler;
- compat_old_sigset_t sa_mask;
- unsigned int sa_flags;
- compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
-};
-
-
-
-struct sigaction32 {
- compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
- unsigned int sa_flags;
- compat_uptr_t sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
-};
-
-typedef struct sigaltstack_32 {
- unsigned int ss_sp;
- int ss_flags;
- compat_size_t ss_size;
-} stack_32_t;
-
-struct sigcontext32 {
- unsigned int _unused[4];
- int signal;
- compat_uptr_t handler;
- unsigned int oldmask;
- compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
-};
-
-struct mcontext32 {
- elf_gregset_t32 mc_gregs;
- elf_fpregset_t mc_fregs;
- unsigned int mc_pad[2];
- elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
-};
-
-struct ucontext32 {
- unsigned int uc_flags;
- unsigned int uc_link;
- stack_32_t uc_stack;
- int uc_pad[7];
- compat_uptr_t uc_regs; /* points to uc_mcontext field */
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
- /* glibc has 1024-bit signal masks, ours are 64-bit */
- int uc_maskext[30];
- int uc_pad2[3];
- struct mcontext32 uc_mcontext;
-};
-
-#endif /* _PPC64_PPC32_H */
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h
index 14cb895bb607..7d84fb5e39f1 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-ppc64/spinlock.h
@@ -21,7 +21,7 @@
#include <linux/config.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
-#include <asm/iSeries/HvCall.h>
+#include <asm/iseries/hv_call.h>
#define __raw_spin_is_locked(x) ((x)->slock != 0)
diff --git a/include/asm-ppc64/stat.h b/include/asm-ppc64/stat.h
deleted file mode 100644
index 973a5f97951d..000000000000
--- a/include/asm-ppc64/stat.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _PPC64_STAT_H
-#define _PPC64_STAT_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/types.h>
-
-struct stat {
- unsigned long st_dev;
- ino_t st_ino;
- nlink_t st_nlink;
- mode_t st_mode;
- uid_t st_uid;
- gid_t st_gid;
- unsigned long st_rdev;
- off_t st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
- unsigned long __unused6;
-};
-
-#define STAT_HAVE_NSEC 1
-
-/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
-struct stat64 {
- unsigned long st_dev; /* Device. */
- unsigned long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long st_rdev; /* Device number, if device. */
- unsigned short __pad2;
- long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
-
- long st_blocks; /* Number 512-byte blocks allocated. */
- int st_atime; /* Time of last access. */
- int st_atime_nsec;
- int st_mtime; /* Time of last modification. */
- int st_mtime_nsec;
- int st_ctime; /* Time of last status change. */
- int st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-#endif
diff --git a/include/asm-ppc64/tlb.h b/include/asm-ppc64/tlb.h
deleted file mode 100644
index 97cb696ce68d..000000000000
--- a/include/asm-ppc64/tlb.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * TLB shootdown specifics for PPC64
- *
- * Copyright (C) 2002 Anton Blanchard, IBM Corp.
- * Copyright (C) 2002 Paul Mackerras, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _PPC64_TLB_H
-#define _PPC64_TLB_H
-
-#include <asm/tlbflush.h>
-
-struct mmu_gather;
-
-extern void pte_free_finish(void);
-
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- flush_tlb_pending();
- pte_free_finish();
-}
-
-/* Avoid pulling in another include just for this */
-#define check_pgt_cache() do { } while (0)
-
-/* Get the generic bits... */
-#include <asm-generic/tlb.h>
-
-/* Nothing needed here in fact... */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-
-#endif /* _PPC64_TLB_H */
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
deleted file mode 100644
index 626f505c6ee3..000000000000
--- a/include/asm-ppc64/tlbflush.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _PPC64_TLBFLUSH_H
-#define _PPC64_TLBFLUSH_H
-
-/*
- * TLB flushing:
- *
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- */
-
-#include <linux/percpu.h>
-#include <asm/page.h>
-
-#define PPC64_TLB_BATCH_NR 192
-
-struct mm_struct;
-struct ppc64_tlb_batch {
- unsigned long index;
- struct mm_struct *mm;
- pte_t pte[PPC64_TLB_BATCH_NR];
- unsigned long vaddr[PPC64_TLB_BATCH_NR];
- unsigned int large;
-};
-DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
-
-extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
-
-static inline void flush_tlb_pending(void)
-{
- struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
-
- if (batch->index)
- __flush_tlb_pending(batch);
- put_cpu_var(ppc64_tlb_batch);
-}
-
-#define flush_tlb_mm(mm) flush_tlb_pending()
-#define flush_tlb_page(vma, addr) flush_tlb_pending()
-#define flush_tlb_page_nohash(vma, addr) do { } while (0)
-#define flush_tlb_range(vma, start, end) \
- do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
-#define flush_tlb_pgtables(mm, start, end) do { } while (0)
-
-extern void flush_hash_page(unsigned long va, pte_t pte, int local);
-void flush_hash_range(unsigned long number, int local);
-
-#endif /* _PPC64_TLBFLUSH_H */
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h
deleted file mode 100644
index 132c1276547b..000000000000
--- a/include/asm-ppc64/uaccess.h
+++ /dev/null
@@ -1,341 +0,0 @@
-#ifndef _PPC64_UACCESS_H
-#define _PPC64_UACCESS_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/processor.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#define KERNEL_DS MAKE_MM_SEG(0UL)
-#define USER_DS MAKE_MM_SEG(0xf000000000000000UL)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current->thread.fs)
-#define set_fs(val) (current->thread.fs = (val))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-/*
- * Use the alpha trick for checking ranges:
- *
- * Is a address valid? This does a straightforward calculation rather
- * than tests.
- *
- * Address valid if:
- * - "addr" doesn't have any high-bits set
- * - AND "size" doesn't have any high-bits set
- * - OR we are in kernel mode.
- *
- * We dont have to check for high bits in (addr+size) because the first
- * two checks force the maximum result to be below the start of the
- * kernel region.
- */
-#define __access_ok(addr,size,segment) \
- (((segment).seg & (addr | size )) == 0)
-
-#define access_ok(type,addr,size) \
- __access_ok(((__force unsigned long)(addr)),(size),get_fs())
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- *
- * As we use the same address space for kernel and user data on the
- * PowerPC, we can just do these as direct assignments. (Of course, the
- * exception handling means that it's no longer "just"...)
- */
-#define get_user(x,ptr) \
- __get_user_check((x),(ptr),sizeof(*(ptr)))
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __get_user_unaligned __get_user
-#define __put_user_unaligned __put_user
-
-extern long __put_user_bad(void);
-
-#define __put_user_nocheck(x,ptr,size) \
-({ \
- long __pu_err; \
- might_sleep(); \
- __chk_user_ptr(ptr); \
- __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
- __pu_err; \
-})
-
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- void __user *__pu_addr = (ptr); \
- might_sleep(); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
- __pu_err; \
-})
-
-#define __put_user_size(x,ptr,size,retval,errret) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \
- case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \
- case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \
- case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \
- default: __put_user_bad(); \
- } \
-} while (0)
-
-/*
- * We don't tell gcc that we are accessing memory, but this is OK
- * because we do not write to any memory gcc knows about, so there
- * are no aliasing issues.
- */
-#define __put_user_asm(x, addr, err, op, errret) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2) # put_user\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .llong 1b,3b\n" \
- ".previous" \
- : "=r"(err) \
- : "r"(x), "b"(addr), "i"(errret), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size) \
-({ \
- long __gu_err; \
- unsigned long __gu_val; \
- might_sleep(); \
- __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user_check(x,ptr,size) \
-({ \
- long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
- if (access_ok(VERIFY_READ,__gu_addr,size)) \
- __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-extern long __get_user_bad(void);
-
-#define __get_user_size(x,ptr,size,retval,errret) \
-do { \
- retval = 0; \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \
- case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \
- case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \
- case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \
- default: (x) = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm(x, addr, err, op, errret) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2) # get_user\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " li %1,0\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .llong 1b,3b\n" \
- ".previous" \
- : "=r"(err), "=r"(x) \
- : "b"(addr), "i"(errret), "0"(err))
-
-/* more complex routines */
-
-extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from,
- unsigned long size);
-
-static inline unsigned long
-__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __get_user_size(*(u8 *)to, from, 1, ret, 1);
- return ret;
- case 2:
- __get_user_size(*(u16 *)to, from, 2, ret, 2);
- return ret;
- case 4:
- __get_user_size(*(u32 *)to, from, 4, ret, 4);
- return ret;
- case 8:
- __get_user_size(*(u64 *)to, from, 8, ret, 8);
- return ret;
- }
- }
- return __copy_tofrom_user((__force void __user *) to, from, n);
-}
-
-static inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- might_sleep();
- return __copy_from_user_inatomic(to, from, n);
-}
-
-static inline unsigned long
-__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
- return ret;
- case 2:
- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
- return ret;
- case 4:
- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
- return ret;
- case 8:
- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8);
- return ret;
- }
- }
- return __copy_tofrom_user(to, (__force const void __user *) from, n);
-}
-
-static inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- might_sleep();
- return __copy_to_user_inatomic(to, from, n);
-}
-
-#define __copy_in_user(to, from, size) \
- __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n);
-
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
-
-static inline unsigned long
-clear_user(void __user *addr, unsigned long size)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_WRITE, addr, size)))
- size = __clear_user(addr, size);
- return size;
-}
-
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, src, 1)))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, str, 1)))
- return __strnlen_user(str, len);
- return 0;
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _PPC64_UACCESS_H */
diff --git a/include/asm-ppc64/ucontext.h b/include/asm-ppc64/ucontext.h
deleted file mode 100644
index ef8cc5b37542..000000000000
--- a/include/asm-ppc64/ucontext.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASMPPC64_UCONTEXT_H
-#define _ASMPPC64_UCONTEXT_H
-
-#include <asm/sigcontext.h>
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- sigset_t uc_sigmask;
- sigset_t __unsued[15]; /* Allow for uc_sigmask growth */
- struct sigcontext uc_mcontext; /* last for extensibility */
-};
-
-#endif /* _ASMPPC64_UCONTEXT_H */