aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/alpha/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/cmpxchg.h239
-rw-r--r--arch/alpha/include/asm/core_apecs.h534
-rw-r--r--arch/alpha/include/asm/core_lca.h378
-rw-r--r--arch/alpha/include/asm/core_t2.h8
-rw-r--r--arch/alpha/include/asm/dma-mapping.h4
-rw-r--r--arch/alpha/include/asm/dma.h9
-rw-r--r--arch/alpha/include/asm/elf.h10
-rw-r--r--arch/alpha/include/asm/hwrpb.h2
-rw-r--r--arch/alpha/include/asm/io.h62
-rw-r--r--arch/alpha/include/asm/irq.h10
-rw-r--r--arch/alpha/include/asm/jensen.h363
-rw-r--r--arch/alpha/include/asm/machvec.h9
-rw-r--r--arch/alpha/include/asm/mmu_context.h45
-rw-r--r--arch/alpha/include/asm/page.h8
-rw-r--r--arch/alpha/include/asm/pgtable.h11
-rw-r--r--arch/alpha/include/asm/processor.h8
-rw-r--r--arch/alpha/include/asm/special_insns.h5
-rw-r--r--arch/alpha/include/asm/spinlock_types.h2
-rw-r--r--arch/alpha/include/asm/tlbflush.h37
-rw-r--r--arch/alpha/include/asm/uaccess.h80
-rw-r--r--arch/alpha/include/asm/vga.h2
-rw-r--r--arch/alpha/include/asm/xchg.h246
-rw-r--r--arch/alpha/include/uapi/asm/compiler.h18
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/alpha/include/uapi/asm/ptrace.h2
-rw-r--r--arch/alpha/include/uapi/asm/socket.h12
27 files changed, 283 insertions, 1825 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 396caece6d6d..483965c5a4de 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += agp.h
generic-y += asm-offsets.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
+generic-y += text-patching.h
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 91d4a4d9258c..ae1b96479d0c 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -3,17 +3,232 @@
#define _ALPHA_CMPXCHG_H
/*
- * Atomic exchange routines.
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
*/
-#define ____xchg(type, args...) __arch_xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
+static inline unsigned long
+____xchg_u8(volatile char *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " insbl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extbl %2,%4,%0\n"
+ " mskbl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u16(volatile short *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " inswl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extwl %2,%4,%0\n"
+ " mskwl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+static inline unsigned long
+____xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid xchg(). */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____xchg(volatile void *ptr, unsigned long x, int size)
+{
+ return
+ size == 1 ? ____xchg_u8(ptr, x) :
+ size == 2 ? ____xchg_u16(ptr, x) :
+ size == 4 ? ____xchg_u32(ptr, x) :
+ size == 8 ? ____xchg_u64(ptr, x) :
+ (__xchg_called_with_bad_pointer(), x);
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+static inline unsigned long
+____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " insbl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extbl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskbl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " inswl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extwl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskwl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u32(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ return
+ size == 1 ? ____cmpxchg_u8(ptr, old, new) :
+ size == 2 ? ____cmpxchg_u16(ptr, old, new) :
+ size == 4 ? ____cmpxchg_u32(ptr, old, new) :
+ size == 8 ? ____cmpxchg_u64(ptr, old, new) :
+ (__cmpxchg_called_with_bad_pointer(), old);
+}
#define xchg_local(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\
+ (__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_, \
sizeof(*(ptr))); \
})
@@ -21,7 +236,7 @@
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, \
sizeof(*(ptr))); \
})
@@ -32,12 +247,6 @@
cmpxchg_local((ptr), (o), (n)); \
})
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __arch_xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
/*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
@@ -48,7 +257,7 @@
__typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) \
- __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ ____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \
__ret; \
})
@@ -59,7 +268,7 @@
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
smp_mb(); \
- __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ __ret = (__typeof__(*(ptr))) ____cmpxchg((ptr), \
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
smp_mb(); \
__ret; \
@@ -71,6 +280,4 @@
arch_cmpxchg((ptr), (o), (n)); \
})
-#undef ____cmpxchg
-
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h
deleted file mode 100644
index 69a2fc62c9c3..000000000000
--- a/arch/alpha/include/asm/core_apecs.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_APECS__H__
-#define __ALPHA_APECS__H__
-
-#include <linux/types.h>
-#include <asm/compiler.h>
-
-/*
- * APECS is the internal name for the 2107x chipset which provides
- * memory controller and PCI access for the 21064 chip based systems.
- *
- * This file is based on:
- *
- * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
- * Data Sheet
- *
- * EC-N0648-72
- *
- *
- * david.rusling@reo.mts.dec.com Initial Version.
- *
- */
-
-/*
- An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
- that get passed through the PCI<->ISA bridge chip. So we've gotta use
- both windows to max out the physical memory we can DMA to. Sigh...
-
- If we try a window at 0 for 1GB as a work-around, we run into conflicts
- with ISA/PCI bus memory which can't be relocated, like VGA aperture and
- BIOS ROMs. So we must put the windows high enough to avoid these areas.
-
- We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
- and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
- Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
- be used for that range (via virt_to_bus()).
-
- Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
- to keep virt_to_bus() from returning an address in the first window, for
- a data area that goes beyond the 64Mb first DMA window. Sigh...
- The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
- we can't just use that here, because of header file looping... :-(
-
- Window 1 will be used for all DMA from the ISA bus; yes, that does
- limit what memory an ISA floppy or sound card or Ethernet can touch, but
- it's also a known limitation on other platforms as well. We use the
- same technique that is used on INTEL platforms with similar limitation:
- set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
- We trust that any ISA bus device drivers will *always* ask for DMAable
- memory explicitly via kmalloc()/get_free_pages() flags arguments.
-
- Note that most PCI bus devices' drivers do *not* explicitly ask for
- DMAable memory; they count on being able to DMA to any memory they
- get from kmalloc()/get_free_pages(). They will also use window 1 for
- any physical memory accesses below 64Mb; the rest will be handled by
- window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
-
- We hope that the area before the first window is large enough so that
- there will be no overlap at the top end (64Mb). We *must* locate the
- PCI cards' memory just below window 1, so that there's still the
- possibility of being able to access it via SPARSE space. This is
- important for cards such as the Matrox Millennium, whose Xserver
- wants to access memory-mapped registers in byte and short lengths.
-
- Note that the XL is treated differently from the AVANTI, even though
- for most other things they are identical. It didn't seem reasonable to
- make the AVANTI support pay for the limitations of the XL. It is true,
- however, that an XL kernel will run on an AVANTI without problems.
-
- %%% All of this should be obviated by the ability to route
- everything through the iommu.
-*/
-
-/*
- * 21071-DA Control and Status registers.
- * These are used for PCI memory access.
- */
-#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL)
-#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL)
-#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL)
-#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL)
-#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL)
-#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL)
-
-#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL)
-#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL)
-
-#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL)
-#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL)
-
-#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL)
-#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL)
-
-#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL)
-#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL)
-#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL)
-
-#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL)
-
-#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL)
-#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL)
-#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL)
-#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL)
-#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL)
-#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL)
-#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL)
-#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL)
-
-#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL)
-#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL)
-#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL)
-#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL)
-#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL)
-#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL)
-#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL)
-#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL)
-
-#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL)
-
-
-/*
- * 21071-CA Control and Status registers.
- * These are used to program memory timing,
- * configure memory and initialise the B-Cache.
- */
-#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL)
-#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL)
-#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL)
-#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL)
-#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL)
-#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL)
-#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL)
-#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL)
-#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL)
-#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL)
-#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL)
-#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL)
-
-/* Bank x Base Address Register */
-#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL)
-#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL)
-#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL)
-#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL)
-#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL)
-#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL)
-#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL)
-#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL)
-#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL)
-
-/* Bank x Configuration Register */
-#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL)
-#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL)
-#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL)
-#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL)
-#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL)
-#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL)
-#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL)
-#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL)
-#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL)
-
-/* Bank x Timing Register A */
-#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL)
-#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL)
-#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL)
-#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL)
-#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL)
-#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL)
-#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL)
-#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL)
-#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL)
-
-/* Bank x Timing Register B */
-#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL)
-#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL)
-#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL)
-#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL)
-#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL)
-#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL)
-#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL)
-#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL)
-#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL)
-
-
-/*
- * Memory spaces:
- */
-#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL)
-#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define APECS_IO (IDENT_ADDR + 0x1c0000000UL)
-#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define APECS_IOC_STAT0_CMD 0xf
-#define APECS_IOC_STAT0_ERR (1<<4)
-#define APECS_IOC_STAT0_LOST (1<<5)
-#define APECS_IOC_STAT0_THIT (1<<6)
-#define APECS_IOC_STAT0_TREF (1<<7)
-#define APECS_IOC_STAT0_CODE_SHIFT 8
-#define APECS_IOC_STAT0_CODE_MASK 0x7
-#define APECS_IOC_STAT0_P_NBR_SHIFT 13
-#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define APECS_HAE_ADDRESS APECS_IOC_HAXR1
-
-
-/*
- * Data structure for handling APECS machine checks:
- */
-
-struct el_apecs_mikasa_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_base3;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long coma_cnfg3;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-
- unsigned long pceb_vid;
- unsigned long pceb_did;
- unsigned long pceb_revision;
- unsigned long pceb_command;
- unsigned long pceb_status;
- unsigned long pceb_latency;
- unsigned long pceb_control;
- unsigned long pceb_arbcon;
- unsigned long pceb_arbpri;
-
- unsigned long esc_id;
- unsigned long esc_revision;
- unsigned long esc_int0;
- unsigned long esc_int1;
- unsigned long esc_elcr0;
- unsigned long esc_elcr1;
- unsigned long esc_last_eisa;
- unsigned long esc_nmi_stat;
-
- unsigned long pci_ir;
- unsigned long pci_imr;
- unsigned long svr_mgr;
-};
-
-/* This for the normal APECS machines. */
-struct el_apecs_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-};
-
-struct el_apecs_procdata
-{
- unsigned long paltemp[32]; /* PAL TEMP REGS. */
- /* EV4-specific fields */
- unsigned long exc_addr; /* Address of excepting instruction. */
- unsigned long exc_sum; /* Summary of arithmetic traps. */
- unsigned long exc_mask; /* Exception mask (from exc_sum). */
- unsigned long iccsr; /* IBox hardware enables. */
- unsigned long pal_base; /* Base address for PALcode. */
- unsigned long hier; /* Hardware Interrupt Enable. */
- unsigned long hirr; /* Hardware Interrupt Request. */
- unsigned long csr; /* D-stream fault info. */
- unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */
- unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
- unsigned long abox_ctl; /* ABox Control Register. */
- unsigned long biu_stat; /* BIU Status. */
- unsigned long biu_addr; /* BUI Address. */
- unsigned long biu_ctl; /* BIU Control. */
- unsigned long fill_syndrome;/* For correcting ECC errors. */
- unsigned long fill_addr; /* Cache block which was being read */
- unsigned long va; /* Effective VA of fault or miss. */
- unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/
-};
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the APECS machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define APECS_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-__EXTERN_INLINE u8 apecs_ioread8(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u16 apecs_ioread16(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u32 apecs_ioread32(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE u64 apecs_ioread64(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- return *(vulp)addr;
-}
-
-__EXTERN_INLINE void apecs_iowrite64(u64 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- *(vulp)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + APECS_IO);
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + APECS_DENSE_MEM);
-}
-
-__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x180000000UL;
-}
-
-__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= APECS_DENSE_MEM;
-}
-
-#undef APECS_SET_HAE
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX apecs
-#define apecs_trivial_io_bw 0
-#define apecs_trivial_io_lq 0
-#define apecs_trivial_rw_bw 2
-#define apecs_trivial_rw_lq 1
-#define apecs_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_APECS__H__ */
diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h
deleted file mode 100644
index d8c3e72ef8f6..000000000000
--- a/arch/alpha/include/asm/core_lca.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_LCA__H__
-#define __ALPHA_LCA__H__
-
-#include <asm/compiler.h>
-#include <asm/mce.h>
-
-/*
- * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
- * for example).
- *
- * This file is based on:
- *
- * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
- * Hardware Reference Manual; Digital Equipment Corp.; May 1994;
- * Maynard, MA; Order Number: EC-N2681-71.
- */
-
-/*
- * NOTE: The LCA uses a Host Address Extension (HAE) register to access
- * PCI addresses that are beyond the first 27 bits of address
- * space. Updating the HAE requires an external cycle (and
- * a memory barrier), which tends to be slow. Instead of updating
- * it on each sparse memory access, we keep the current HAE value
- * cached in variable cache_hae. Only if the cached HAE differs
- * from the desired HAE value do we actually updated HAE register.
- * The HAE register is preserved by the interrupt handler entry/exit
- * code, so this scheme works even in the presence of interrupts.
- *
- * Dense memory space doesn't require the HAE, but is restricted to
- * aligned 32 and 64 bit accesses. Special Cycle and Interrupt
- * Acknowledge cycles may also require the use of the HAE. The LCA
- * limits I/O address space to the bottom 24 bits of address space,
- * but this easily covers the 16 bit ISA I/O address space.
- */
-
-/*
- * NOTE 2! The memory operations do not set any memory barriers, as
- * it's not needed for cases like a frame buffer that is essentially
- * memory-like. You need to do them by hand if the operations depend
- * on ordering.
- *
- * Similarly, the port I/O operations do a "mb" only after a write
- * operation: if an mb is needed before (as in the case of doing
- * memory mapped I/O first, and then a port I/O operation to the same
- * device), it needs to be done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do
- * the mb all the time, but right now I'm hoping this will work out.
- * Avoiding mb's may potentially be a noticeable speed improvement,
- * but I can't honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to
- * non-interrupts is another fun race area. Don't do it (because if
- * you do, I'll have to do *everything* with interrupts disabled,
- * ugh).
- */
-
-/*
- * Memory Controller registers:
- */
-#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL)
-#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL)
-#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL)
-#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL)
-#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL)
-#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL)
-#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL)
-#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL)
-#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL)
-#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL)
-#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL)
-#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL)
-#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL)
-#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL)
-#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL)
-#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL)
-#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL)
-#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL)
-#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL)
-
-/*
- * I/O Controller registers:
- */
-#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL)
-#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL)
-#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL)
-#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL)
-#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL)
-#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL)
-#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL)
-#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL)
-#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL)
-#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL)
-#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL)
-#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL)
-#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL)
-#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL)
-#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL)
-#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL)
-#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL)
-#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL)
-#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL)
-#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL)
-#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL)
-
-/*
- * Memory spaces:
- */
-#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL)
-#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define LCA_IO (IDENT_ADDR + 0x1c0000000UL)
-#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define LCA_IOC_STAT0_CMD 0xf
-#define LCA_IOC_STAT0_ERR (1<<4)
-#define LCA_IOC_STAT0_LOST (1<<5)
-#define LCA_IOC_STAT0_THIT (1<<6)
-#define LCA_IOC_STAT0_TREF (1<<7)
-#define LCA_IOC_STAT0_CODE_SHIFT 8
-#define LCA_IOC_STAT0_CODE_MASK 0x7
-#define LCA_IOC_STAT0_P_NBR_SHIFT 13
-#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define LCA_HAE_ADDRESS LCA_IOC_HAE
-
-/* LCA PMR Power Management register defines */
-#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
-#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */
-#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
-#define LCA_PMR_INTO 0x40 /* Interrupt override */
-#define LCA_PMR_DMAO 0x80 /* DMA override */
-#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */
-#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */
-#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L
-
-/* LCA PMR Macros */
-
-#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
-#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
-
-#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
-#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
-#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
-
-/* LCA PMR Divisor values */
-#define LCA_PMR_DIV_1 0x0
-#define LCA_PMR_DIV_1_5 0x1
-#define LCA_PMR_DIV_2 0x2
-#define LCA_PMR_DIV_4 0x3
-#define LCA_PMR_DIV_8 0x4
-#define LCA_PMR_DIV_16 0x5
-#define LCA_PMR_DIV_MIN DIV_1
-#define LCA_PMR_DIV_MAX DIV_16
-
-
-/*
- * Data structure for handling LCA machine checks. Correctable errors
- * result in a short logout frame, uncorrectable ones in a long one.
- */
-struct el_lca_mcheck_short {
- struct el_common h; /* common logout header */
- unsigned long esr; /* error-status register */
- unsigned long ear; /* error-address register */
- unsigned long dc_stat; /* dcache status register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
-};
-
-struct el_lca_mcheck_long {
- struct el_common h; /* common logout header */
- unsigned long pt[31]; /* PAL temps */
- unsigned long exc_addr; /* exception address */
- unsigned long pad1[3];
- unsigned long pal_base; /* PALcode base address */
- unsigned long hier; /* hw interrupt enable */
- unsigned long hirr; /* hw interrupt request */
- unsigned long mm_csr; /* MMU control & status */
- unsigned long dc_stat; /* data cache status */
- unsigned long dc_addr; /* data cache addr register */
- unsigned long abox_ctl; /* address box control register */
- unsigned long esr; /* error status register */
- unsigned long ear; /* error address register */
- unsigned long car; /* cache control register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
- unsigned long va; /* virtual address register */
-};
-
-union el_lca {
- struct el_common * c;
- struct el_lca_mcheck_long * l;
- struct el_lca_mcheck_short * s;
-};
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the Noname machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define LCA_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-
-__EXTERN_INLINE u8 lca_ioread8(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u16 lca_ioread16(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE u32 lca_ioread32(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE u64 lca_ioread64(const void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- return *(vulp)addr;
-}
-
-__EXTERN_INLINE void lca_iowrite64(u64 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- *(vulp)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + LCA_IO);
-}
-
-__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + LCA_DENSE_MEM);
-}
-
-__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x120000000UL;
-}
-
-__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= LCA_DENSE_MEM;
-}
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX lca
-#define lca_trivial_rw_bw 2
-#define lca_trivial_rw_lq 1
-#define lca_trivial_io_bw 0
-#define lca_trivial_io_lq 0
-#define lca_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_LCA__H__ */
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index ab956b1625b5..ca9b091d9c5f 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -25,16 +25,8 @@
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
-/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
#define _GAMMA_BIAS 0x8000000000UL
-
-#if defined(CONFIG_ALPHA_GENERIC)
-#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias
-#elif defined(CONFIG_ALPHA_GAMMA)
#define GAMMA_BIAS _GAMMA_BIAS
-#else
-#define GAMMA_BIAS 0
-#endif
/*
* Memory spaces:
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 6ce7e2041685..ad5a59b035cb 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -6,11 +6,7 @@ extern const struct dma_map_ops alpha_pci_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
-#ifdef CONFIG_ALPHA_JENSEN
- return NULL;
-#else
return &alpha_pci_ops;
-#endif
}
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h
index a04d76b96089..3a88812b7165 100644
--- a/arch/alpha/include/asm/dma.h
+++ b/arch/alpha/include/asm/dma.h
@@ -82,11 +82,6 @@
just a wiring limit.
*/
-/* The maximum address for ISA DMA transfer on Alpha XL, due to an
- hardware SIO limitation, is 64MB.
-*/
-#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
-
/* The maximum address for ISA DMA transfer on RUFFIAN,
due to an hardware SIO limitation, is 16MB.
*/
@@ -107,9 +102,7 @@
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
#else
-# if defined(CONFIG_ALPHA_XL)
-# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
-# elif defined(CONFIG_ALPHA_RUFFIAN)
+# if defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_SABLE)
# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index e6da23f1da83..50c82187e60e 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
+#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
/*
* These are used to set parameters in the core dumps.
@@ -133,16 +133,10 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
#define ELF_PLATFORM \
({ \
enum implver_enum i_ = implver(); \
- ( i_ == IMPLVER_EV4 ? "ev4" \
- : i_ == IMPLVER_EV5 \
- ? (amask(AMASK_BWX) ? "ev5" : "ev56") \
+ ( i_ == IMPLVER_EV5 ? "ev56" \
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
-#define SET_PERSONALITY(EX) \
- set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
- ? PER_LINUX_32BIT : PER_LINUX)
-
extern int alpha_l1i_cacheshape;
extern int alpha_l1d_cacheshape;
extern int alpha_l2_cacheshape;
diff --git a/arch/alpha/include/asm/hwrpb.h b/arch/alpha/include/asm/hwrpb.h
index fc76f36265ad..db831cf8de10 100644
--- a/arch/alpha/include/asm/hwrpb.h
+++ b/arch/alpha/include/asm/hwrpb.h
@@ -135,7 +135,7 @@ struct crb_struct {
/* virtual->physical map */
unsigned long map_entries;
unsigned long map_pages;
- struct vf_map_struct map[1];
+ struct vf_map_struct map[];
};
struct memclust_struct {
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 4f47a5003fe8..fa3e4c246cda 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -10,10 +10,6 @@
#include <asm/machvec.h>
#include <asm/hwrpb.h>
-/* The generic header contains only prototypes. Including it ensures that
- the implementation we have here matches that interface. */
-#include <asm-generic/iomap.h>
-
/*
* Virtual -> physical identity mapping starts at this offset
*/
@@ -88,7 +84,6 @@ static inline void * phys_to_virt(unsigned long address)
#define virt_to_phys virt_to_phys
#define phys_to_virt phys_to_virt
-#define page_to_phys(page) page_to_pa(page)
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff
@@ -203,16 +198,10 @@ static inline int generic_is_mmio(const volatile void __iomem *a)
#else
-#if defined(CONFIG_ALPHA_APECS)
-# include <asm/core_apecs.h>
-#elif defined(CONFIG_ALPHA_CIA)
+#if defined(CONFIG_ALPHA_CIA)
# include <asm/core_cia.h>
#elif defined(CONFIG_ALPHA_IRONGATE)
# include <asm/core_irongate.h>
-#elif defined(CONFIG_ALPHA_JENSEN)
-# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_LCA)
-# include <asm/core_lca.h>
#elif defined(CONFIG_ALPHA_MARVEL)
# include <asm/core_marvel.h>
#elif defined(CONFIG_ALPHA_MCPCIA)
@@ -283,13 +272,24 @@ extern void __raw_writeq(u64 b, volatile void __iomem *addr);
#define __raw_writel __raw_writel
#define __raw_writeq __raw_writeq
-/*
- * Mapping from port numbers to __iomem space is pretty easy.
- */
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern u64 ioread64(const void __iomem *);
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite64(u64, void __iomem *);
+
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
-/* These two have to be extern inline because of the extern prototype from
- <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
- the same declaration. */
extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
{
return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
@@ -540,8 +540,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define ioread16be(p) swab16(ioread16(p))
#define ioread32be(p) swab32(ioread32(p))
+#define ioread64be(p) swab64(ioread64(p))
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+#define iowrite64be(v,p) iowrite64(swab64(v), (p))
#define inb_p inb
#define inw_p inw
@@ -631,33 +633,11 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define outsw outsw
#define outsl outsl
-/*
- * The Alpha Jensen hardware for some rather strange reason puts
- * the RTC clock at 0x170 instead of 0x70. Probably due to some
- * misguided idea about using 0x70 for NMI stuff.
- *
- * These defines will override the defaults when doing RTC queries
- */
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
-#else
-# ifdef CONFIG_ALPHA_JENSEN
-# define RTC_PORT(x) (0x170+(x))
-# else
-# define RTC_PORT(x) (0x70 + (x))
-# endif
-#endif
+#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 0
-/*
- * These get provided from <asm-generic/iomap.h> since alpha does not
- * select GENERIC_IOMAP.
- */
#define ioread64 ioread64
#define iowrite64 iowrite64
-#define ioread64be ioread64be
-#define iowrite64be iowrite64be
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 432402c8e47f..d83b26b6660f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -31,16 +31,11 @@
# define NR_IRQS (32768 + 16) /* marvel - 32 pids */
# endif
-#elif defined(CONFIG_ALPHA_CABRIOLET) || \
- defined(CONFIG_ALPHA_EB66P) || \
- defined(CONFIG_ALPHA_EB164) || \
- defined(CONFIG_ALPHA_PC164) || \
+#elif defined(CONFIG_ALPHA_PC164) || \
defined(CONFIG_ALPHA_LX164)
# define NR_IRQS 35
-#elif defined(CONFIG_ALPHA_EB66) || \
- defined(CONFIG_ALPHA_EB64P) || \
- defined(CONFIG_ALPHA_MIKASA)
+#elif defined(CONFIG_ALPHA_MIKASA)
# define NR_IRQS 32
#elif defined(CONFIG_ALPHA_ALCOR) || \
@@ -55,7 +50,6 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
- defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h
deleted file mode 100644
index 66eb049eb421..000000000000
--- a/arch/alpha/include/asm/jensen.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_JENSEN_H
-#define __ALPHA_JENSEN_H
-
-#include <asm/compiler.h>
-
-/*
- * Defines for the AlphaPC EISA IO and memory address space.
- */
-
-/*
- * NOTE! The memory operations do not set any memory barriers, as it's
- * not needed for cases like a frame buffer that is essentially memory-like.
- * You need to do them by hand if the operations depend on ordering.
- *
- * Similarly, the port IO operations do a "mb" only after a write operation:
- * if an mb is needed before (as in the case of doing memory mapped IO
- * first, and then a port IO operation to the same device), it needs to be
- * done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do the
- * mb all the time, but right now I'm hoping this will work out. Avoiding
- * mb's may potentially be a noticeable speed improvement, but I can't
- * honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to non-interrupts
- * is another fun race area. Don't do it (because if you do, I'll have to
- * do *everything* with interrupts disabled, ugh).
- */
-
-/*
- * EISA Interrupt Acknowledge address
- */
-#define EISA_INTA (IDENT_ADDR + 0x100000000UL)
-
-/*
- * FEPROM addresses
- */
-#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL)
-#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL)
-
-/*
- * VL82C106 base address
- */
-#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL)
-
-/*
- * EISA "Host Address Extension" address (bits 25-31 of the EISA address)
- */
-#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL)
-
-/*
- * "SYSCTL" register address
- */
-#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL)
-
-/*
- * "spare" register address
- */
-#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL)
-
-/*
- * EISA memory address offset
- */
-#define EISA_MEM (IDENT_ADDR + 0x200000000UL)
-
-/*
- * EISA IO address offset
- */
-#define EISA_IO (IDENT_ADDR + 0x300000000UL)
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * Handle the "host address register". This needs to be set
- * to the high 7 bits of the EISA address. This is also needed
- * for EISA IO addresses, which are only 16 bits wide (the
- * hae needs to be set to 0).
- *
- * HAE isn't needed for the local IO operations, though.
- */
-
-#define JENSEN_HAE_ADDRESS EISA_HAE
-#define JENSEN_HAE_MASK 0x1ffffff
-
-__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
-{
- /* hae on the Jensen is bits 31:25 shifted right */
- addr >>= 25;
- if (addr != alpha_mv.hae_cache)
- set_hae(addr);
-}
-
-#define vuip volatile unsigned int *
-#define vulp volatile unsigned long *
-
-/*
- * IO functions
- *
- * The "local" functions are those that don't go out to the EISA bus,
- * but instead act on the VL82C106 chip directly.. This is mainly the
- * keyboard, RTC, printer and first two serial lines..
- *
- * The local stuff makes for some complications, but it seems to be
- * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
- * convinced that I need one of the newer machines.
- */
-
-__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
-{
- return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
-}
-
-__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
-{
- *(vuip)((addr << 9) + EISA_VL82C106) = b;
- mb();
-}
-
-__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
- mb();
-}
-
-/*
- * It seems gcc is not very good at optimizing away logical
- * operations that result in operations across inline functions.
- * Which is why this is a macro.
- */
-
-#define jensen_is_local(addr) ( \
-/* keyboard */ (addr == 0x60 || addr == 0x64) || \
-/* RTC */ (addr == 0x170 || addr == 0x171) || \
-/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
-/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
-/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
-
-__EXTERN_INLINE u8 jensen_inb(unsigned long addr)
-{
- if (jensen_is_local(addr))
- return jensen_local_inb(addr);
- else
- return jensen_bus_inb(addr);
-}
-
-__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr)
-{
- if (jensen_is_local(addr))
- jensen_local_outb(b, addr);
- else
- jensen_bus_outb(b, addr);
-}
-
-__EXTERN_INLINE u16 jensen_inw(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_inl(unsigned long addr)
-{
- jensen_set_hae(0);
- return *(vuip) ((addr << 7) + EISA_IO + 0x60);
-}
-
-__EXTERN_INLINE u64 jensen_inq(unsigned long addr)
-{
- jensen_set_hae(0);
- return *(vulp) ((addr << 7) + EISA_IO + 0x60);
-}
-
-__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
- mb();
-}
-
-__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
- mb();
-}
-
-__EXTERN_INLINE void jensen_outq(u64 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vulp) ((addr << 7) + EISA_IO + 0x60) = b;
- mb();
-}
-
-/*
- * Memory functions.
- */
-
-__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
-}
-
-__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
-}
-
-__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long r0, r1;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- r0 = *(vuip) (addr);
- r1 = *(vuip) (addr + (4 << 7));
- return r1 << 32 | r0;
-}
-
-__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
-}
-
-__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
-}
-
-__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
-}
-
-__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- *(vuip) (addr) = b;
- *(vuip) (addr + (4 << 7)) = b >> 32;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr)
-{
- return (void __iomem *)addr;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + 0x100000000ul);
-}
-
-__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
-{
- return (long)addr >= 0;
-}
-
-__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= 0x100000000ul;
-}
-
-/* New-style ioread interface. All the routines are so ugly for Jensen
- that it doesn't make sense to merge them. */
-
-#define IOPORT(OS, NS) \
-__EXTERN_INLINE u##NS jensen_ioread##NS(const void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- return jensen_read##OS(xaddr - 0x100000000ul); \
- else \
- return jensen_in##OS((unsigned long)xaddr); \
-} \
-__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- jensen_write##OS(b, xaddr - 0x100000000ul); \
- else \
- jensen_out##OS(b, (unsigned long)xaddr); \
-}
-
-IOPORT(b, 8)
-IOPORT(w, 16)
-IOPORT(l, 32)
-IOPORT(q, 64)
-
-#undef IOPORT
-
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX jensen
-#define jensen_trivial_rw_bw 0
-#define jensen_trivial_rw_lq 0
-#define jensen_trivial_io_bw 0
-#define jensen_trivial_io_lq 0
-#define jensen_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_JENSEN_H */
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 8623f995d34c..490fc880bb3f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -72,15 +72,6 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
int (*mv_is_mmio)(const volatile void __iomem *);
- void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *);
- void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
-
- void (*mv_flush_tlb_current)(struct mm_struct *);
- void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr);
-
void (*update_irq_hw)(unsigned long, unsigned long, int);
void (*ack_irq)(unsigned long);
void (*device_interrupt)(unsigned long vector);
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 29a3e3a1f02b..eee8fe836a59 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -71,9 +71,7 @@ __reload_thread(struct pcb_struct *pcb)
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ASN (alpha_mv.max_asn)
#else
-# ifdef CONFIG_ALPHA_EV4
-# define MAX_ASN EV4_MAX_ASN
-# elif defined(CONFIG_ALPHA_EV5)
+# if defined(CONFIG_ALPHA_EV56)
# define MAX_ASN EV5_MAX_ASN
# else
# define MAX_ASN EV6_MAX_ASN
@@ -162,26 +160,6 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
-__EXTERN_INLINE void
-ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next)
-{
- /* As described, ASN's are broken for TLB usage. But we can
- optimize for switching between threads -- if the mm is
- unchanged from current we needn't flush. */
- /* ??? May not be needed because EV4 PALcode recognizes that
- ASN's are broken and does a tbiap itself on swpctx, under
- the "Must set ASN or flush" rule. At least this is true
- for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
- I'm going to leave this here anyway, just to Be Sure. -- r~ */
- if (prev_mm != next_mm)
- tbiap();
-
- /* Do continue to allocate ASNs, because we can still use them
- to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next);
-}
-
extern void __load_new_mm_context(struct mm_struct *);
asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr,
long cause, struct pt_regs *regs);
@@ -209,25 +187,8 @@ ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
__load_new_mm_context(next_mm);
}
-__EXTERN_INLINE void
-ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
-{
- __load_new_mm_context(next_mm);
- tbiap();
-}
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
-# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev4_activate_mm((x),(y))
-# else
-# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev5_activate_mm((x),(y))
-# endif
-#endif
+#define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
+#define activate_mm(x,y) ev5_activate_mm((x),(y))
#define init_new_context init_new_context
static inline int
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 70419e6be1a3..5ec4c77e432e 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -4,11 +4,7 @@
#include <linux/const.h>
#include <asm/pal.h>
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#include <vdso/page.h>
#ifndef __ASSEMBLY__
@@ -18,7 +14,7 @@ extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
- vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 635f0a5f5bbd..44e7aedac6e8 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -192,13 +192,6 @@ extern unsigned long __zero_page(void);
#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- \
- pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
- pte; \
-})
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -334,7 +327,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
@@ -360,7 +353,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern void paging_init(void);
-/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
+/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 55bb1c09fd39..5dce5518a211 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -8,23 +8,19 @@
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
-#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
-
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
-#define STACK_TOP \
- (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+#define STACK_TOP (0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE \
- ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
/* This is dead. Everything has been moved to thread_info. */
struct thread_struct { };
diff --git a/arch/alpha/include/asm/special_insns.h b/arch/alpha/include/asm/special_insns.h
index ca2c5c30b22e..798d0bdb11f9 100644
--- a/arch/alpha/include/asm/special_insns.h
+++ b/arch/alpha/include/asm/special_insns.h
@@ -15,10 +15,7 @@ enum implver_enum {
(enum implver_enum) __implver; })
#else
/* Try to eliminate some dead code. */
-#ifdef CONFIG_ALPHA_EV4
-#define implver() IMPLVER_EV4
-#endif
-#ifdef CONFIG_ALPHA_EV5
+#ifdef CONFIG_ALPHA_EV56
#define implver() IMPLVER_EV5
#endif
#if defined(CONFIG_ALPHA_EV6)
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 2526fd3be5fd..05a444d77c53 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -3,7 +3,7 @@
#define _ALPHA_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
typedef struct {
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 94dc37cf873a..ba4b359d6c39 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -14,16 +14,6 @@
extern void __load_new_mm_context(struct mm_struct *);
-/* Use a few helper functions to hide the ugly broken ASN
- numbers on early Alphas (ev4 and ev45). */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
- tbiap();
-}
-
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
@@ -35,19 +25,6 @@ ev5_flush_tlb_current(struct mm_struct *mm)
specific icache page. */
__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- int tbi_flag = 2;
- if (vma->vm_flags & VM_EXEC) {
- __load_new_mm_context(mm);
- tbi_flag = 3;
- }
- tbi(tbi_flag, addr);
-}
-
-__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
@@ -59,18 +36,8 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
}
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
+#define flush_tlb_current ev5_flush_tlb_current
+#define flush_tlb_current_page ev5_flush_tlb_current_page
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c32c2584c0b7..ef295cbb797c 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -96,9 +96,6 @@ struct __large_struct { unsigned long buf[100]; };
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
"2:\n" \
@@ -112,33 +109,6 @@ struct __large_struct { unsigned long buf[100]; };
EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- load, so we have to do a general unaligned operation. */
-
-#define __get_user_16(addr) \
-{ \
- long __gu_tmp; \
- __asm__("1: ldq_u %0,0(%3)\n" \
- "2: ldq_u %1,1(%3)\n" \
- " extwl %0,%3,%0\n" \
- " extwh %1,%3,%1\n" \
- " or %0,%1,%0\n" \
- "3:\n" \
- EXC(1b,3b,%0,%2) \
- EXC(2b,3b,%0,%2) \
- : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
- : "r"(addr), "2"(__gu_err)); \
-}
-
-#define __get_user_8(addr) \
- __asm__("1: ldq_u %0,0(%2)\n" \
- " extbl %0,%2,%0\n" \
- "2:\n" \
- EXC(1b,2b,%0,%1) \
- : "=&r"(__gu_val), "=r"(__gu_err) \
- : "r"(addr), "1"(__gu_err))
-#endif
extern void __put_user_unknown(void);
@@ -192,9 +162,6 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \
@@ -208,53 +175,6 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- write, so we have to do a general unaligned operation. */
-
-#define __put_user_16(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
- __asm__ __volatile__( \
- "1: ldq_u %2,1(%5)\n" \
- "2: ldq_u %1,0(%5)\n" \
- " inswh %6,%5,%4\n" \
- " inswl %6,%5,%3\n" \
- " mskwh %2,%5,%2\n" \
- " mskwl %1,%5,%1\n" \
- " or %2,%4,%2\n" \
- " or %1,%3,%1\n" \
- "3: stq_u %2,1(%5)\n" \
- "4: stq_u %1,0(%5)\n" \
- "5:\n" \
- EXC(1b,5b,$31,%0) \
- EXC(2b,5b,$31,%0) \
- EXC(3b,5b,$31,%0) \
- EXC(4b,5b,$31,%0) \
- : "=r"(__pu_err), "=&r"(__pu_tmp1), \
- "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
- "=&r"(__pu_tmp4) \
- : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
-}
-
-#define __put_user_8(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2; \
- __asm__ __volatile__( \
- "1: ldq_u %1,0(%4)\n" \
- " insbl %3,%4,%2\n" \
- " mskbl %1,%4,%1\n" \
- " or %1,%2,%1\n" \
- "2: stq_u %1,0(%4)\n" \
- "3:\n" \
- EXC(1b,3b,$31,%0) \
- EXC(2b,3b,$31,%0) \
- : "=r"(__pu_err), \
- "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
- : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
-}
-#endif
-
/*
* Complex access routines
diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h
index 4c347a8454c7..919931cb5b63 100644
--- a/arch/alpha/include/asm/vga.h
+++ b/arch/alpha/include/asm/vga.h
@@ -13,6 +13,7 @@
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
static inline void scr_writew(u16 val, volatile u16 *addr)
{
@@ -40,6 +41,7 @@ static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
/* Do not trust that the usage will be correct; analyze the arguments. */
extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+extern void scr_memmovew(u16 *d, const u16 *s, unsigned int count);
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
deleted file mode 100644
index 7adb80c6746a..000000000000
--- a/arch/alpha/include/asm/xchg.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_CMPXCHG_H
-#error Do not include xchg.h directly!
-#else
-/*
- * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
- * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/cmpxchg.h.
- */
-
-/*
- * Atomic exchange.
- * Since it can be used to implement critical sections
- * it must clobber "memory" (also for interrupts in UP).
- */
-
-static inline unsigned long
-____xchg(_u8, volatile char *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " insbl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extbl %2,%4,%0\n"
- " mskbl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u16, volatile short *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " inswl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extwl %2,%4,%0\n"
- " mskwl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u32, volatile int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stl_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-static inline unsigned long
-____xchg(_u64, volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stq_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid xchg(). */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____xchg(, volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return ____xchg(_u8, ptr, x);
- case 2:
- return ____xchg(_u16, ptr, x);
- case 4:
- return ____xchg(_u32, ptr, x);
- case 8:
- return ____xchg(_u64, ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-static inline unsigned long
-____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " insbl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extbl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskbl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " inswl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extwl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskwl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u32, volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- switch (size) {
- case 1:
- return ____cmpxchg(_u8, ptr, old, new);
- case 2:
- return ____cmpxchg(_u16, ptr, old, new);
- case 4:
- return ____cmpxchg(_u32, ptr, old, new);
- case 8:
- return ____cmpxchg(_u64, ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif
diff --git a/arch/alpha/include/uapi/asm/compiler.h b/arch/alpha/include/uapi/asm/compiler.h
index 0e00c0e13374..8c03740966b4 100644
--- a/arch/alpha/include/uapi/asm/compiler.h
+++ b/arch/alpha/include/uapi/asm/compiler.h
@@ -95,24 +95,6 @@
#define __kernel_ldwu(mem) (mem)
#define __kernel_stb(val,mem) ((mem) = (val))
#define __kernel_stw(val,mem) ((mem) = (val))
-#else
-#define __kernel_ldbu(mem) \
- ({ unsigned char __kir; \
- __asm__(".arch ev56; \
- ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_ldwu(mem) \
- ({ unsigned short __kir; \
- __asm__(".arch ev56; \
- ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_stb(val,mem) \
- __asm__(".arch ev56; \
- stb %1,%0" : "=m"(mem) : "r"(val))
-#define __kernel_stw(val,mem) \
- __asm__(".arch ev56; \
- stw %1,%0" : "=m"(mem) : "r"(val))
#endif
-
#endif /* _UAPI__ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index 763929e814e9..1e700468a685 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -78,6 +78,9 @@
#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
+#define MADV_GUARD_REMOVE 103 /* unguard range */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h
index 5ca45934fcbb..72ed913a910f 100644
--- a/arch/alpha/include/uapi/asm/ptrace.h
+++ b/arch/alpha/include/uapi/asm/ptrace.h
@@ -42,6 +42,8 @@ struct pt_regs {
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
+/* This makes the stack 16-byte aligned as GCC expects */
+ unsigned long __pad0;
/* These are saved by PAL-code: */
unsigned long ps;
unsigned long pc;
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index e94f621903fe..8f1f18adcdb5 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -140,6 +140,18 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
+#define SCM_TS_OPT_ID 81
+
+#define SO_RCVPRIORITY 82
+
+#define SO_PASSRIGHTS 83
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64