aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/highmem-internal.h48
-rw-r--r--include/linux/highmem.h43
-rw-r--r--mm/highmem.c6
3 files changed, 81 insertions, 16 deletions
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index c5a22177db85..1bbe96dc8be6 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -68,6 +68,26 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused();
}
+static inline void *kmap_local_page(struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
@@ -140,6 +160,28 @@ static inline void kunmap(struct page *page)
#endif
}
+static inline void *kmap_local_page(struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
+}
+
static inline void *kmap_atomic(struct page *page)
{
preempt_disable();
@@ -181,4 +223,10 @@ do { \
__kunmap_atomic(__addr); \
} while (0)
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7d098bd621f6..f597830f26b4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(void *addr);
static inline void kmap_flush_unused(void);
/**
- * kmap_atomic - Atomically map a page for temporary usage
+ * kmap_local_page - Map a page for temporary usage
* @page: Pointer to the page to be mapped
*
* Returns: The virtual address of the mapping
*
- * Side effect: On return pagefaults and preemption are disabled.
- *
* Can be invoked from any context.
*
* Requires careful handling when nesting multiple mappings because the map
* management is stack based. The unmap has to be in the reverse order of
* the map operation:
*
- * addr1 = kmap_atomic(page1);
- * addr2 = kmap_atomic(page2);
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
* ...
- * kunmap_atomic(addr2);
- * kunmap_atomic(addr1);
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
*
* Unmapping addr1 before addr2 is invalid and causes malfunction.
*
@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(void);
* virtual address of the direct mapping. Only real highmem pages are
* temporarily mapped.
*
- * While it is significantly faster than kmap() it comes with restrictions
- * about the pointer validity and the side effects of disabling page faults
- * and preemption. Use it only when absolutely necessary, e.g. from non
- * preemptible contexts.
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
*/
static inline void *kmap_atomic(struct page *page);
@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct page *page);
*
* Counterpart to kmap_atomic().
*
- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
* preemption.
- *
- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
- * in the low memory area. For real highmen pages the mapping which was
- * established with kmap_atomic() is destroyed.
*/
/* Highmem related interfaces for management code */
diff --git a/mm/highmem.c b/mm/highmem.c
index d1ef06aa6de6..83f9660f168f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -453,6 +453,11 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
unsigned long vaddr;
int idx;
+ /*
+ * Disable migration so resulting virtual address is stable
+ * accross preemption.
+ */
+ migrate_disable();
preempt_disable();
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -522,6 +527,7 @@ void kunmap_local_indexed(void *vaddr)
current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
kmap_local_idx_pop();
preempt_enable();
+ migrate_enable();
}
EXPORT_SYMBOL(kunmap_local_indexed);