aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/arm/mm/ioremap.c88
1 files changed, 44 insertions, 44 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 72286f9a4d30..2129070065c3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/memblock.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -35,6 +36,7 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/mach/map.h>
@@ -115,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
void __check_vmalloc_seq(struct mm_struct *mm)
{
- unsigned int seq;
+ int seq;
do {
- seq = init_mm.context.vmalloc_seq;
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
- mm->context.vmalloc_seq = seq;
- } while (seq != init_mm.context.vmalloc_seq);
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
+ * memcpy as well.
+ */
+ atomic_set_release(&mm->context.vmalloc_seq, seq);
+ } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -141,14 +148,8 @@ void __check_vmalloc_seq(struct mm_struct *mm)
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmdp;
-
- flush_cache_vunmap(addr, end);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmdp = pmd_offset(pud, addr);
+ pmd_t *pmdp = pmd_off_k(addr);
+
do {
pmd_t pmd = *pmdp;
@@ -161,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
- init_mm.context.vmalloc_seq++;
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
/*
* Free the page table, if there was one.
@@ -178,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
- if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
- __check_vmalloc_seq(current->active_mm);
+ check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
@@ -189,9 +189,7 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
@@ -199,9 +197,6 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
*/
unmap_area_sections(virt, size);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
do {
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
@@ -221,19 +216,13 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
@@ -301,7 +290,8 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
* Don't allow RAM to be mapped with mismatched attributes - this
* causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
+ if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
+ mtype != MT_MEMORY_RW))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -416,6 +406,11 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
__builtin_return_address(0));
}
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
+{
+ set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
+}
+
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (__force void *)arch_ioremap_caller(phys_addr, size,
@@ -423,7 +418,7 @@ void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
__builtin_return_address(0));
}
-void __iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
struct static_vm *svm;
@@ -451,16 +446,9 @@ void __iounmap(volatile void __iomem *io_addr)
vunmap(addr);
}
-
-void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-
-void iounmap(volatile void __iomem *cookie)
-{
- arch_iounmap(cookie);
-}
EXPORT_SYMBOL(iounmap);
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
static int pci_ioremap_mem_type = MT_DEVICE;
void pci_ioremap_set_mem_type(int mem_type)
@@ -468,16 +456,20 @@ void pci_ioremap_set_mem_type(int mem_type)
pci_ioremap_mem_type = mem_type;
}
-int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
- return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
- PCI_IO_VIRT_BASE + offset + SZ_64K,
- phys_addr,
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
-EXPORT_SYMBOL_GPL(pci_ioremap_io);
+EXPORT_SYMBOL(pci_remap_iospace);
void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
@@ -494,3 +486,11 @@ void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return memblock_is_map_memory(pfn);
+}