aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/alpha/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/cacheflush.h32
-rw-r--r--arch/alpha/include/asm/io.h75
-rw-r--r--arch/alpha/include/asm/pgtable.h16
3 files changed, 69 insertions, 54 deletions
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 89128489cb59..9945ff483eaf 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -4,19 +4,6 @@
#include <linux/mm.h>
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
@@ -48,7 +35,7 @@ extern void smp_imb(void);
extern void __load_new_mm_context(struct mm_struct *);
static inline void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
@@ -59,20 +46,17 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
mm->context[smp_processor_id()] = 0;
}
}
-#else
-extern void flush_icache_user_range(struct vm_area_struct *vma,
+#define flush_icache_user_page flush_icache_user_page
+#else /* CONFIG_SMP */
+extern void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
-#endif
+#define flush_icache_user_page flush_icache_user_page
+#endif /* CONFIG_SMP */
/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
- flush_icache_user_range((vma), (page), 0, 0)
+ flush_icache_user_page((vma), (page), 0, 0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index d1ed5a8133c5..a4d0c19f1e79 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/compiler.h>
-#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
@@ -310,14 +309,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline unsigned int ioread8(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
extern inline unsigned int ioread16(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@@ -358,7 +361,9 @@ extern inline void outw(u16 b, unsigned long port)
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline unsigned int ioread32(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@@ -403,14 +408,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
@@ -451,14 +460,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -487,14 +500,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
-#define readb_relaxed(addr) __raw_readb(addr)
-#define readw_relaxed(addr) __raw_readw(addr)
-#define readl_relaxed(addr) __raw_readl(addr)
-#define readq_relaxed(addr) __raw_readq(addr)
-#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
-#define writew_relaxed(b, addr) __raw_writew(b, addr)
-#define writel_relaxed(b, addr) __raw_writel(b, addr)
-#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
+
+extern u8 readb_relaxed(const volatile void __iomem *addr);
+extern u16 readw_relaxed(const volatile void __iomem *addr);
+extern u32 readl_relaxed(const volatile void __iomem *addr);
+extern u64 readq_relaxed(const volatile void __iomem *addr);
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+extern inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+extern inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+extern inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+extern inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+#endif
+
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+#define writeq_relaxed writeq
/*
* String version of IO memory access ops:
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 0267aa8a4f86..162c17b2631f 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -276,15 +276,6 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
/*
* The smp_read_barrier_depends() in the following functions are required to
* order the load of *dir (the pointer in the top level page table) with any
@@ -305,6 +296,7 @@ extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
+#define pmd_offset pmd_offset
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
@@ -314,9 +306,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
+#define pte_offset_kernel pte_offset_kernel
extern pgd_t swapper_pg_dir[1024];
@@ -355,8 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern void paging_init(void);
-#include <asm-generic/pgtable.h>
-
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA