aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/highmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/highmem.c')
-rw-r--r--arch/arc/mm/highmem.c84
1 files changed, 7 insertions, 77 deletions
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index fc8849e4f72e..c79912a6b196 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -6,8 +6,8 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -36,9 +36,8 @@
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
- * slots across NR_CPUS would be more than sufficient (generic code defines
- * KM_TYPE_NR as 20).
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
@@ -47,80 +46,12 @@
*/
extern pte_t * pkmap_page_table;
-static pte_t * fixmap_page_table;
-
-void *kmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return page_address(page);
-
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void *kmap_atomic(struct page *page)
-{
- int idx, cpu_idx;
- unsigned long vaddr;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- cpu_idx = kmap_atomic_idx_push();
- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
- vaddr = FIXMAP_ADDR(idx);
-
- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
- mk_pte(page, kmap_prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kv)
-{
- unsigned long kvaddr = (unsigned long)kv;
-
- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
-
- /*
- * Because preemption is disabled, this vaddr can be associated
- * with the current allocated index.
- * But in case of multiple live kmap_atomic(), it still relies on
- * callers to unmap in right order.
- */
- int cpu_idx = kmap_atomic_idx();
- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
-
- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
-
- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
- pgd_t *pgd_k;
- p4d_t *p4d_k;
- pud_t *pud_k;
- pmd_t *pmd_k;
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
pte_t *pte_k;
- pgd_k = pgd_offset_k(kvaddr);
- p4d_k = p4d_offset(pgd_k, kvaddr);
- pud_k = pud_offset(p4d_k, kvaddr);
- pmd_k = pmd_offset(pud_k, kvaddr);
-
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -134,10 +65,9 @@ void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
}