From 3729fe2bc2a01f4cc1aa88be8f64af06084c87d6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 16 Jul 2019 04:07:13 +1000 Subject: Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next" This reverts commit 031e610a6a21448a63dff7a0416e5e206724caac, reversing changes made to 52d2d44eee8091e740d0d275df1311fb8373c9a9. The mm changes in there we premature and not fully ack or reviewed by core mm folks, I dropped the ball by merging them via this tree, so lets take em all back out. Signed-off-by: Dave Airlie --- mm/memory.c | 145 +++++++++++++++--------------------------------------------- 1 file changed, 36 insertions(+), 109 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 462aa47f8878..ddf20bd0c317 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2032,17 +2032,18 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long } EXPORT_SYMBOL(vm_iomap_memory); -static int apply_to_pte_range(struct pfn_range_apply *closure, pmd_t *pmd, - unsigned long addr, unsigned long end) +static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) { pte_t *pte; int err; pgtable_t token; spinlock_t *uninitialized_var(ptl); - pte = (closure->mm == &init_mm) ? + pte = (mm == &init_mm) ? pte_alloc_kernel(pmd, addr) : - pte_alloc_map_lock(closure->mm, pmd, addr, &ptl); + pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; @@ -2053,109 +2054,86 @@ static int apply_to_pte_range(struct pfn_range_apply *closure, pmd_t *pmd, token = pmd_pgtable(*pmd); do { - err = closure->ptefn(pte++, token, addr, closure); + err = fn(pte++, token, addr, data); if (err) break; } while (addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - if (closure->mm != &init_mm) + if (mm != &init_mm) pte_unmap_unlock(pte-1, ptl); return err; } -static int apply_to_pmd_range(struct pfn_range_apply *closure, pud_t *pud, - unsigned long addr, unsigned long end) +static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) { pmd_t *pmd; unsigned long next; - int err = 0; + int err; BUG_ON(pud_huge(*pud)); - pmd = pmd_alloc(closure->mm, pud, addr); + pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; - do { next = pmd_addr_end(addr, end); - if (!closure->alloc && pmd_none_or_clear_bad(pmd)) - continue; - err = apply_to_pte_range(closure, pmd, addr, next); + err = apply_to_pte_range(mm, pmd, addr, next, fn, data); if (err) break; } while (pmd++, addr = next, addr != end); return err; } -static int apply_to_pud_range(struct pfn_range_apply *closure, p4d_t *p4d, - unsigned long addr, unsigned long end) +static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) { pud_t *pud; unsigned long next; - int err = 0; + int err; - pud = pud_alloc(closure->mm, p4d, addr); + pud = pud_alloc(mm, p4d, addr); if (!pud) return -ENOMEM; - do { next = pud_addr_end(addr, end); - if (!closure->alloc && pud_none_or_clear_bad(pud)) - continue; - err = apply_to_pmd_range(closure, pud, addr, next); + err = apply_to_pmd_range(mm, pud, addr, next, fn, data); if (err) break; } while (pud++, addr = next, addr != end); return err; } -static int apply_to_p4d_range(struct pfn_range_apply *closure, pgd_t *pgd, - unsigned long addr, unsigned long end) +static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) { p4d_t *p4d; unsigned long next; - int err = 0; + int err; - p4d = p4d_alloc(closure->mm, pgd, addr); + p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return -ENOMEM; - do { next = p4d_addr_end(addr, end); - if (!closure->alloc && p4d_none_or_clear_bad(p4d)) - continue; - err = apply_to_pud_range(closure, p4d, addr, next); + err = apply_to_pud_range(mm, p4d, addr, next, fn, data); if (err) break; } while (p4d++, addr = next, addr != end); return err; } -/** - * apply_to_pfn_range - Scan a region of virtual memory, calling a provided - * function on each leaf page table entry - * @closure: Details about how to scan and what function to apply - * @addr: Start virtual address - * @size: Size of the region - * - * If @closure->alloc is set to 1, the function will fill in the page table - * as necessary. Otherwise it will skip non-present parts. - * Note: The caller must ensure that the range does not contain huge pages. - * The caller must also assure that the proper mmu_notifier functions are - * called before and after the call to apply_to_pfn_range. - * - * WARNING: Do not use this function unless you know exactly what you are - * doing. It is lacking support for huge pages and transparent huge pages. - * - * Return: Zero on success. If the provided function returns a non-zero status, - * the page table walk will terminate and that status will be returned. - * If @closure->alloc is set to 1, then this function may also return memory - * allocation errors arising from allocating page table memory. +/* + * Scan a region of virtual memory, filling in page tables as necessary + * and calling a provided function on each leaf page table. */ -int apply_to_pfn_range(struct pfn_range_apply *closure, - unsigned long addr, unsigned long size) +int apply_to_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) { pgd_t *pgd; unsigned long next; @@ -2165,65 +2143,16 @@ int apply_to_pfn_range(struct pfn_range_apply *closure, if (WARN_ON(addr >= end)) return -EINVAL; - pgd = pgd_offset(closure->mm, addr); + pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); - if (!closure->alloc && pgd_none_or_clear_bad(pgd)) - continue; - err = apply_to_p4d_range(closure, pgd, addr, next); + err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); if (err) break; } while (pgd++, addr = next, addr != end); return err; } - -/** - * struct page_range_apply - Closure structure for apply_to_page_range() - * @pter: The base closure structure we derive from - * @fn: The leaf pte function to call - * @data: The leaf pte function closure - */ -struct page_range_apply { - struct pfn_range_apply pter; - pte_fn_t fn; - void *data; -}; - -/* - * Callback wrapper to enable use of apply_to_pfn_range for - * the apply_to_page_range interface - */ -static int apply_to_page_range_wrapper(pte_t *pte, pgtable_t token, - unsigned long addr, - struct pfn_range_apply *pter) -{ - struct page_range_apply *pra = - container_of(pter, typeof(*pra), pter); - - return pra->fn(pte, token, addr, pra->data); -} - -/* - * Scan a region of virtual memory, filling in page tables as necessary - * and calling a provided function on each leaf page table. - * - * WARNING: Do not use this function unless you know exactly what you are - * doing. It is lacking support for huge pages and transparent huge pages. - */ -int apply_to_page_range(struct mm_struct *mm, unsigned long addr, - unsigned long size, pte_fn_t fn, void *data) -{ - struct page_range_apply pra = { - .pter = {.mm = mm, - .alloc = 1, - .ptefn = apply_to_page_range_wrapper }, - .fn = fn, - .data = data - }; - - return apply_to_pfn_range(&pra.pter, addr, size); -} EXPORT_SYMBOL_GPL(apply_to_page_range); /* @@ -2309,7 +2238,7 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) ret = vmf->vma->vm_ops->page_mkwrite(vmf); /* Restore original flags so that caller is not surprised */ vmf->flags = old_flags; - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { lock_page(page); @@ -2586,7 +2515,7 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vmf); - if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)) + if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; return finish_mkwrite_fault(vmf); } @@ -2607,8 +2536,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & - (VM_FAULT_ERROR | VM_FAULT_NOPAGE | - VM_FAULT_RETRY)))) { + (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(vmf->page); return tmp; } @@ -3673,8 +3601,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) unlock_page(vmf->page); tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || - (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | - VM_FAULT_RETRY)))) { + (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(vmf->page); return tmp; } -- cgit v1.2.3-59-g8ed1b