diff options
Diffstat (limited to 'include/linux/highmem.h')
-rw-r--r-- | include/linux/highmem.h | 489 |
1 files changed, 320 insertions, 169 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ea5cdbd8c2c3..e9912da5441b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -5,186 +5,229 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> +#include <linux/cacheflush.h> +#include <linux/kmsan.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> -#include <asm/cacheflush.h> +#include "highmem-internal.h" -#ifndef ARCH_HAS_FLUSH_ANON_PAGE -static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) -{ -} -#endif - -#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} -static inline void flush_kernel_vmap_range(void *vaddr, int size) -{ -} -static inline void invalidate_kernel_vmap_range(void *vaddr, int size) -{ -} -#endif - -#include <asm/kmap_types.h> - -#ifdef CONFIG_HIGHMEM -#include <asm/highmem.h> - -/* declarations for linux/mm/highmem.c */ -unsigned int nr_free_highpages(void); -extern atomic_long_t _totalhigh_pages; -static inline unsigned long totalhigh_pages(void) -{ - return (unsigned long)atomic_long_read(&_totalhigh_pages); -} - -static inline void totalhigh_pages_inc(void) -{ - atomic_long_inc(&_totalhigh_pages); -} - -static inline void totalhigh_pages_dec(void) -{ - atomic_long_dec(&_totalhigh_pages); -} - -static inline void totalhigh_pages_add(long count) -{ - atomic_long_add(count, &_totalhigh_pages); -} - -static inline void totalhigh_pages_set(long val) -{ - atomic_long_set(&_totalhigh_pages, val); -} - -void kmap_flush_unused(void); +/** + * kmap - Map a page for long term usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Can only be invoked from preemptible task context because on 32bit + * systems with CONFIG_HIGHMEM enabled this function might sleep. + * + * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area + * this returns the virtual address of the direct kernel mapping. + * + * The returned virtual address is globally visible and valid up to the + * point where it is unmapped via kunmap(). The pointer can be handed to + * other contexts. + * + * For highmem pages on 32bit systems this can be slow as the mapping space + * is limited and protected by a global lock. In case that there is no + * mapping slot available the function blocks until a slot is released via + * kunmap(). + */ +static inline void *kmap(struct page *page); -struct page *kmap_to_page(void *addr); +/** + * kunmap - Unmap the virtual address mapped by kmap() + * @page: Pointer to the page which was mapped by kmap() + * + * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of + * pages in the low memory area. + */ +static inline void kunmap(struct page *page); -#else /* CONFIG_HIGHMEM */ +/** + * kmap_to_page - Get the page for a kmap'ed address + * @addr: The address to look up + * + * Returns: The page which is mapped to @addr. + */ +static inline struct page *kmap_to_page(void *addr); -static inline unsigned int nr_free_highpages(void) { return 0; } +/** + * kmap_flush_unused - Flush all unused kmap mappings in order to + * remove stray mappings + */ +static inline void kmap_flush_unused(void); -static inline struct page *kmap_to_page(void *addr) -{ - return virt_to_page(addr); -} +/** + * kmap_local_page - Map a page for temporary usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Can be invoked from any context, including interrupts. + * + * Requires careful handling when nesting multiple mappings because the map + * management is stack based. The unmap has to be in the reverse order of + * the map operation: + * + * addr1 = kmap_local_page(page1); + * addr2 = kmap_local_page(page2); + * ... + * kunmap_local(addr2); + * kunmap_local(addr1); + * + * Unmapping addr1 before addr2 is invalid and causes malfunction. + * + * Contrary to kmap() mappings the mapping is only valid in the context of + * the caller and cannot be handed to other contexts. + * + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the + * virtual address of the direct mapping. Only real highmem pages are + * temporarily mapped. + * + * While it is significantly faster than kmap() for the higmem case it + * comes with restrictions about the pointer validity. + * + * On HIGHMEM enabled systems mapping a highmem page has the side effect of + * disabling migration in order to keep the virtual address stable across + * preemption. No caller of kmap_local_page() can rely on this side effect. + */ +static inline void *kmap_local_page(struct page *page); -static inline unsigned long totalhigh_pages(void) { return 0UL; } +/** + * kmap_local_folio - Map a page in this folio for temporary usage + * @folio: The folio containing the page. + * @offset: The byte offset within the folio which identifies the page. + * + * Requires careful handling when nesting multiple mappings because the map + * management is stack based. The unmap has to be in the reverse order of + * the map operation:: + * + * addr1 = kmap_local_folio(folio1, offset1); + * addr2 = kmap_local_folio(folio2, offset2); + * ... + * kunmap_local(addr2); + * kunmap_local(addr1); + * + * Unmapping addr1 before addr2 is invalid and causes malfunction. + * + * Contrary to kmap() mappings the mapping is only valid in the context of + * the caller and cannot be handed to other contexts. + * + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the + * virtual address of the direct mapping. Only real highmem pages are + * temporarily mapped. + * + * While it is significantly faster than kmap() for the higmem case it + * comes with restrictions about the pointer validity. Only use when really + * necessary. + * + * On HIGHMEM enabled systems mapping a highmem page has the side effect of + * disabling migration in order to keep the virtual address stable across + * preemption. No caller of kmap_local_folio() can rely on this side effect. + * + * Context: Can be invoked from any context. + * Return: The virtual address of @offset. + */ +static inline void *kmap_local_folio(struct folio *folio, size_t offset); -#ifndef ARCH_HAS_KMAP -static inline void *kmap(struct page *page) -{ - might_sleep(); - return page_address(page); -} +/** + * kmap_atomic - Atomically map a page for temporary usage - Deprecated! + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * In fact a wrapper around kmap_local_page() which also disables pagefaults + * and, depending on PREEMPT_RT configuration, also CPU migration and + * preemption. Therefore users should not count on the latter two side effects. + * + * Mappings should always be released by kunmap_atomic(). + * + * Do not use in new code. Use kmap_local_page() instead. + * + * It is used in atomic context when code wants to access the contents of a + * page that might be allocated from high memory (see __GFP_HIGHMEM), for + * example a page in the pagecache. The API has two functions, and they + * can be used in a manner similar to the following:: + * + * // Find the page of interest. + * struct page *page = find_get_page(mapping, offset); + * + * // Gain access to the contents of that page. + * void *vaddr = kmap_atomic(page); + * + * // Do something to the contents of that page. + * memset(vaddr, 0, PAGE_SIZE); + * + * // Unmap that page. + * kunmap_atomic(vaddr); + * + * Note that the kunmap_atomic() call takes the result of the kmap_atomic() + * call, not the argument. + * + * If you need to map two pages because you want to copy from one page to + * another you need to keep the kmap_atomic calls strictly nested, like: + * + * vaddr1 = kmap_atomic(page1); + * vaddr2 = kmap_atomic(page2); + * + * memcpy(vaddr1, vaddr2, PAGE_SIZE); + * + * kunmap_atomic(vaddr2); + * kunmap_atomic(vaddr1); + */ +static inline void *kmap_atomic(struct page *page); -static inline void kunmap(struct page *page) -{ -} +/* Highmem related interfaces for management code */ +static inline unsigned int nr_free_highpages(void); +static inline unsigned long totalhigh_pages(void); -static inline void *kmap_atomic(struct page *page) -{ - preempt_disable(); - pagefault_disable(); - return page_address(page); -} -#define kmap_atomic_prot(page, prot) kmap_atomic(page) - -static inline void __kunmap_atomic(void *addr) +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { - pagefault_enable(); - preempt_enable(); } - -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) - -#define kmap_flush_unused() do {} while(0) -#endif - -#endif /* CONFIG_HIGHMEM */ - -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -DECLARE_PER_CPU(int, __kmap_atomic_idx); - -static inline int kmap_atomic_idx_push(void) -{ - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - -#ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); #endif - return idx; -} -static inline int kmap_atomic_idx(void) +#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE +static inline void flush_kernel_vmap_range(void *vaddr, int size) { - return __this_cpu_read(__kmap_atomic_idx) - 1; } - -static inline void kmap_atomic_idx_pop(void) +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { -#ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); -#else - __this_cpu_dec(__kmap_atomic_idx); -#endif } - #endif -/* - * Prevent people trying to call kunmap_atomic() as if it were kunmap() - * kunmap_atomic() should get the return value of kmap_atomic, not the page. - */ -#define kunmap_atomic(addr) \ -do { \ - BUILD_BUG_ON(__same_type((addr), struct page *)); \ - __kunmap_atomic(addr); \ -} while (0) - - /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { - void *addr = kmap_atomic(page); + void *addr = kmap_local_page(page); clear_user_page(addr, vaddr, page); - kunmap_atomic(addr); + kunmap_local(addr); } #endif -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE /** - * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags - * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * - * This function will allocate a page for a VMA but the caller is expected - * to specify via movableflags whether the page will be movable in the - * future or not + * Returns: The allocated and zeroed HIGHMEM page + * + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed * * An architecture may override this function by defining - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own * implementation. */ static inline struct page * -__alloc_zeroed_user_highpage(gfp_t movableflags, - struct vm_area_struct *vma, - unsigned long vaddr) +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) { - struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, - vma, vaddr); + struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (page) clear_user_highpage(page, vaddr); @@ -193,35 +236,47 @@ __alloc_zeroed_user_highpage(gfp_t movableflags, } #endif -/** - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move - * @vma: The VMA the page is to be allocated for - * @vaddr: The virtual address the page will be inserted into - * - * This function will allocate a page for a VMA that the caller knows will - * be able to migrate in the future using move_pages() or reclaimed - */ -static inline struct page * -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, - unsigned long vaddr) +static inline void clear_highpage(struct page *page) { - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); + void *kaddr = kmap_local_page(page); + clear_page(kaddr); + kunmap_local(kaddr); } -static inline void clear_highpage(struct page *page) +static inline void clear_highpage_kasan_tagged(struct page *page) { - void *kaddr = kmap_atomic(page); - clear_page(kaddr); - kunmap_atomic(kaddr); + u8 tag; + + tag = page_kasan_tag(page); + page_kasan_tag_reset(page); + clear_highpage(page); + page_kasan_tag_set(page, tag); } +#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE + +static inline void tag_clear_highpage(struct page *page) +{ +} + +#endif + +/* + * If we pass in a base or tail page, we can zero up to PAGE_SIZE. + * If we pass in a head page, we can zero up to the size of the compound page. + */ +#ifdef CONFIG_HIGHMEM +void zero_user_segments(struct page *page, unsigned start1, unsigned end1, + unsigned start2, unsigned end2); +#else static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) { - void *kaddr = kmap_atomic(page); + void *kaddr = kmap_local_page(page); + unsigned int i; - BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); + BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); @@ -229,9 +284,11 @@ static inline void zero_user_segments(struct page *page, if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); - kunmap_atomic(kaddr); - flush_dcache_page(page); + kunmap_local(kaddr); + for (i = 0; i < compound_nr(page); i++) + flush_dcache_page(page + i); } +#endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) @@ -252,11 +309,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from, { char *vfrom, *vto; - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); copy_user_page(vto, vfrom, vaddr, to); - kunmap_atomic(vto); - kunmap_atomic(vfrom); + kmsan_unpoison_memory(page_address(to), PAGE_SIZE); + kunmap_local(vto); + kunmap_local(vfrom); } #endif @@ -267,13 +325,106 @@ static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); copy_page(vto, vfrom); - kunmap_atomic(vto); - kunmap_atomic(vfrom); + kmsan_copy_page_meta(to, from); + kunmap_local(vto); + kunmap_local(vfrom); } #endif +static inline void memcpy_page(struct page *dst_page, size_t dst_off, + struct page *src_page, size_t src_off, + size_t len) +{ + char *dst = kmap_local_page(dst_page); + char *src = kmap_local_page(src_page); + + VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); + memcpy(dst + dst_off, src + src_off, len); + kunmap_local(src); + kunmap_local(dst); +} + +static inline void memset_page(struct page *page, size_t offset, int val, + size_t len) +{ + char *addr = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memset(addr + offset, val, len); + kunmap_local(addr); +} + +static inline void memcpy_from_page(char *to, struct page *page, + size_t offset, size_t len) +{ + char *from = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memcpy(to, from + offset, len); + kunmap_local(from); +} + +static inline void memcpy_to_page(struct page *page, size_t offset, + const char *from, size_t len) +{ + char *to = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memcpy(to + offset, from, len); + flush_dcache_page(page); + kunmap_local(to); +} + +static inline void memzero_page(struct page *page, size_t offset, size_t len) +{ + char *addr = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memset(addr + offset, 0, len); + flush_dcache_page(page); + kunmap_local(addr); +} + +/** + * folio_zero_segments() - Zero two byte ranges in a folio. + * @folio: The folio to write to. + * @start1: The first byte to zero. + * @xend1: One more than the last byte in the first range. + * @start2: The first byte to zero in the second range. + * @xend2: One more than the last byte in the second range. + */ +static inline void folio_zero_segments(struct folio *folio, + size_t start1, size_t xend1, size_t start2, size_t xend2) +{ + zero_user_segments(&folio->page, start1, xend1, start2, xend2); +} + +/** + * folio_zero_segment() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @xend: One more than the last byte to zero. + */ +static inline void folio_zero_segment(struct folio *folio, + size_t start, size_t xend) +{ + zero_user_segments(&folio->page, start, xend, 0, 0); +} + +/** + * folio_zero_range() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @length: The number of bytes to zero. + */ +static inline void folio_zero_range(struct folio *folio, + size_t start, size_t length) +{ + zero_user_segments(&folio->page, start, start + length, 0, 0); +} + #endif /* _LINUX_HIGHMEM_H */ |