From 4cf58924951ef80eec636b863e7a53973c44261a Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Thu, 3 Jan 2019 15:28:34 -0800 Subject: mm: treewide: remove unused address argument from pte_alloc functions Patch series "Add support for fast mremap". This series speeds up the mremap(2) syscall by copying page tables at the PMD level even for non-THP systems. There is concern that the extra 'address' argument that mremap passes to pte_alloc may do something subtle architecture related in the future that may make the scheme not work. Also we find that there is no point in passing the 'address' to pte_alloc since its unused. This patch therefore removes this argument tree-wide resulting in a nice negative diff as well. Also ensuring along the way that the enabled architectures do not do anything funky with the 'address' argument that goes unnoticed by the optimization. Build and boot tested on x86-64. Build tested on arm64. The config enablement patch for arm64 will be posted in the future after more testing. The changes were obtained by applying the following Coccinelle script. (thanks Julia for answering all Coccinelle questions!). Following fix ups were done manually: * Removal of address argument from pte_fragment_alloc * Removal of pte_alloc_one_fast definitions from m68k and microblaze. // Options: --include-headers --no-includes // Note: I split the 'identifier fn' line, so if you are manually // running it, please unsplit it so it runs for you. virtual patch @pte_alloc_func_def depends on patch exists@ identifier E2; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; type T2; @@ fn(... - , T2 E2 ) { ... } @pte_alloc_func_proto_noarg depends on patch exists@ type T1, T2, T3, T4; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ ( - T3 fn(T1, T2); + T3 fn(T1); | - T3 fn(T1, T2, T4); + T3 fn(T1, T2); ) @pte_alloc_func_proto depends on patch exists@ identifier E1, E2, E4; type T1, T2, T3, T4; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ ( - T3 fn(T1 E1, T2 E2); + T3 fn(T1 E1); | - T3 fn(T1 E1, T2 E2, T4 E4); + T3 fn(T1 E1, T2 E2); ) @pte_alloc_func_call depends on patch exists@ expression E2; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ fn(... -, E2 ) @pte_alloc_macro depends on patch exists@ identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; identifier a, b, c; expression e; position p; @@ ( - #define fn(a, b, c) e + #define fn(a, b) e | - #define fn(a, b) e + #define fn(a) e ) Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com Signed-off-by: Joel Fernandes (Google) Suggested-by: Kirill A. Shutemov Acked-by: Kirill A. Shutemov Cc: Michal Hocko Cc: Julia Lawall Cc: Kirill A. Shutemov Cc: William Kucharski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 8 ++++---- mm/kasan/init.c | 2 +- mm/memory.c | 17 ++++++++--------- mm/migrate.c | 2 +- mm/mremap.c | 2 +- mm/userfaultfd.c | 2 +- 6 files changed, 16 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cbd977b1d60d..faf357eaf0ce 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -568,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, return VM_FAULT_FALLBACK; } - pgtable = pte_alloc_one(vma->vm_mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) { ret = VM_FAULT_OOM; goto release; @@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) struct page *zero_page; bool set; vm_fault_t ret; - pgtable = pte_alloc_one(vma->vm_mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) return VM_FAULT_OOM; zero_page = mm_get_huge_zero_page(vma->vm_mm); @@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, return VM_FAULT_SIGBUS; if (arch_needs_pgtable_deposit()) { - pgtable = pte_alloc_one(vma->vm_mm, addr); + pgtable = pte_alloc_one(vma->vm_mm); if (!pgtable) return VM_FAULT_OOM; } @@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (!vma_is_anonymous(vma)) return 0; - pgtable = pte_alloc_one(dst_mm, addr); + pgtable = pte_alloc_one(dst_mm); if (unlikely(!pgtable)) goto out; diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 34afad56497b..45a1b5e38e1e 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -123,7 +123,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, pte_t *p; if (slab_is_available()) - p = pte_alloc_one_kernel(&init_mm, addr); + p = pte_alloc_one_kernel(&init_mm); else p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); if (!p) diff --git a/mm/memory.c b/mm/memory.c index 2dd2f9ab57f4..a52663c0612d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, } } -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) { spinlock_t *ptl; - pgtable_t new = pte_alloc_one(mm, address); + pgtable_t new = pte_alloc_one(mm); if (!new) return -ENOMEM; @@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) return 0; } -int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) +int __pte_alloc_kernel(pmd_t *pmd) { - pte_t *new = pte_alloc_one_kernel(&init_mm, address); + pte_t *new = pte_alloc_one_kernel(&init_mm); if (!new) return -ENOMEM; @@ -2896,7 +2896,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) * * Here we only have down_read(mmap_sem). */ - if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) + if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; /* See the comment in pte_alloc_one_map() */ @@ -3043,7 +3043,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); spin_unlock(vmf->ptl); vmf->prealloc_pte = NULL; - } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { return VM_FAULT_OOM; } map_pte: @@ -3122,7 +3122,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { - vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); + vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; smp_wmb(); /* See comment in __pte_alloc() */ @@ -3360,8 +3360,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf) start_pgoff + nr_pages - 1); if (pmd_none(*vmf->pmd)) { - vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, - vmf->address); + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); if (!vmf->prealloc_pte) goto out; smp_wmb(); /* See comment in __pte_alloc() */ diff --git a/mm/migrate.c b/mm/migrate.c index 5d1839a9148d..ccf8966caf6f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2636,7 +2636,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, * * Here we only have down_read(mmap_sem). */ - if (pte_alloc(mm, pmdp, addr)) + if (pte_alloc(mm, pmdp)) goto abort; /* See the comment in pte_alloc_one_map() */ diff --git a/mm/mremap.c b/mm/mremap.c index def01d86e36f..fc3f92962a7e 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -236,7 +236,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pmd_trans_unstable(old_pmd)) continue; } - if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) + if (pte_alloc(new_vma->vm_mm, new_pmd)) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 48368589f519..065c1ce191c4 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -550,7 +550,7 @@ retry: break; } if (unlikely(pmd_none(dst_pmdval)) && - unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { + unlikely(__pte_alloc(dst_mm, dst_pmd))) { err = -ENOMEM; break; } -- cgit v1.2.3-59-g8ed1b From 2c91bd4a4e2e530582d6fd643ea7b86b27907151 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Thu, 3 Jan 2019 15:28:38 -0800 Subject: mm: speed up mremap by 20x on large regions Android needs to mremap large regions of memory during memory management related operations. The mremap system call can be really slow if THP is not enabled. The bottleneck is move_page_tables, which is copying each pte at a time, and can be really slow across a large map. Turning on THP may not be a viable option, and is not for us. This patch speeds up the performance for non-THP system by copying at the PMD level when possible. The speedup is an order of magnitude on x86 (~20x). On a 1GB mremap, the mremap completion times drops from 3.4-3.6 milliseconds to 144-160 microseconds. Before: Total mremap time for 1GB data: 3521942 nanoseconds. Total mremap time for 1GB data: 3449229 nanoseconds. Total mremap time for 1GB data: 3488230 nanoseconds. After: Total mremap time for 1GB data: 150279 nanoseconds. Total mremap time for 1GB data: 144665 nanoseconds. Total mremap time for 1GB data: 158708 nanoseconds. If THP is enabled the optimization is mostly skipped except in certain situations. [joel@joelfernandes.org: fix 'move_normal_pmd' unused function warning] Link: http://lkml.kernel.org/r/20181108224457.GB209347@google.com Link: http://lkml.kernel.org/r/20181108181201.88826-3-joelaf@google.com Signed-off-by: Joel Fernandes (Google) Acked-by: Kirill A. Shutemov Reviewed-by: William Kucharski Cc: Julia Lawall Cc: Michal Hocko Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 5 +++++ mm/mremap.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) (limited to 'mm') diff --git a/arch/Kconfig b/arch/Kconfig index e1e540ffa979..b70c952ac838 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -535,6 +535,11 @@ config HAVE_IRQ_TIME_ACCOUNTING Archs need to ensure they use a high enough resolution clock to support irq time accounting and then call enable_sched_clock_irqtime(). +config HAVE_MOVE_PMD + bool + help + Archs that select this are able to move page tables at the PMD level. + config HAVE_ARCH_TRANSPARENT_HUGEPAGE bool diff --git a/mm/mremap.c b/mm/mremap.c index fc3f92962a7e..3320616ed93f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, drop_rmap_locks(vma); } +#ifdef CONFIG_HAVE_MOVE_PMD +static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd) +{ + spinlock_t *old_ptl, *new_ptl; + struct mm_struct *mm = vma->vm_mm; + pmd_t pmd; + + if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) + || old_end - old_addr < PMD_SIZE) + return false; + + /* + * The destination pmd shouldn't be established, free_pgtables() + * should have release it. + */ + if (WARN_ON(!pmd_none(*new_pmd))) + return false; + + /* + * We don't have to worry about the ordering of src and dst + * ptlocks because exclusive mmap_sem prevents deadlock. + */ + old_ptl = pmd_lock(vma->vm_mm, old_pmd); + new_ptl = pmd_lockptr(mm, new_pmd); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + + /* Clear the pmd */ + pmd = *old_pmd; + pmd_clear(old_pmd); + + VM_BUG_ON(!pmd_none(*new_pmd)); + + /* Set the new pmd */ + set_pmd_at(mm, new_addr, new_pmd, pmd); + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + if (new_ptl != old_ptl) + spin_unlock(new_ptl); + spin_unlock(old_ptl); + + return true; +} +#endif + unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, @@ -235,7 +281,25 @@ unsigned long move_page_tables(struct vm_area_struct *vma, split_huge_pmd(vma, old_pmd, old_addr); if (pmd_trans_unstable(old_pmd)) continue; + } else if (extent == PMD_SIZE) { +#ifdef CONFIG_HAVE_MOVE_PMD + /* + * If the extent is PMD-sized, try to speed the move by + * moving at the PMD level if possible. + */ + bool moved; + + if (need_rmap_locks) + take_rmap_locks(vma); + moved = move_normal_pmd(vma, old_addr, new_addr, + old_end, old_pmd, new_pmd); + if (need_rmap_locks) + drop_rmap_locks(vma); + if (moved) + continue; +#endif } + if (pte_alloc(new_vma->vm_mm, new_pmd)) break; next = (new_addr + PMD_SIZE) & PMD_MASK; -- cgit v1.2.3-59-g8ed1b From fa45f1162f28cbba6c38180647b7b300f317ecb4 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 3 Jan 2019 15:28:55 -0800 Subject: mm/: remove caller signal_pending branch predictions This is already done for us internally by the signal machinery. Link: http://lkml.kernel.org/r/20181116002713.8474-5-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/hugetlb.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 29655fb47a2c..9f5e323e883e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1125,7 +1125,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, break; } - if (unlikely(signal_pending_state(state, current))) { + if (signal_pending_state(state, current)) { ret = -EINTR; break; } diff --git a/mm/gup.c b/mm/gup.c index 8cb68a50dbdf..6dd33e16a806 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -727,7 +727,7 @@ retry: * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ - if (unlikely(fatal_signal_pending(current))) { + if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e37efd5d8318..745088810965 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4231,7 +4231,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ - if (unlikely(fatal_signal_pending(current))) { + if (fatal_signal_pending(current)) { remainder = 0; break; } -- cgit v1.2.3-59-g8ed1b From f86196ea8737c98ea96e5f95c99d0367be39a5d2 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 3 Jan 2019 15:29:02 -0800 Subject: fs: don't open code lru_to_page() Multiple filesystems open code lru_to_page(). Rectify this by moving the macro from mm_inline (which is specific to lru stuff) to the more generic mm.h header and start using the macro where appropriate. No functional changes. Link: http://lkml.kernel.org/r/20181129104810.23361-1-nborisov@suse.com Link: https://lkml.kernel.org/r/20181129075301.29087-1-nborisov@suse.com Signed-off-by: Nikolay Borisov Acked-by: Michal Hocko Reviewed-by: David Hildenbrand Reviewed-by: Mike Rapoport Acked-by: Pankaj gupta Acked-by: "Yan, Zheng" [ceph] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/afs/file.c | 5 +++-- fs/btrfs/extent_io.c | 3 +-- fs/ceph/addr.c | 5 ++--- fs/cifs/file.c | 3 ++- fs/ext4/readpage.c | 2 +- fs/ocfs2/aops.c | 3 ++- fs/orangefs/inode.c | 2 +- include/linux/mm.h | 2 ++ include/linux/mm_inline.h | 3 --- mm/swap.c | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/fs/afs/file.c b/fs/afs/file.c index d6bc3f5d784b..323ae9912203 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); @@ -441,7 +442,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, /* Count the number of contiguous pages at the front of the list. Note * that the list goes prev-wards rather than next-wards. */ - first = list_entry(pages->prev, struct page, lru); + first = lru_to_page(pages); index = first->index + 1; n = 1; for (p = first->lru.prev; p != pages; p = p->prev) { @@ -473,7 +474,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, * page at the end of the file. */ do { - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); index = page->index; if (add_to_page_cache_lru(page, mapping, index, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fc126b92ea59..52abe4082680 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4103,8 +4103,7 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) { - struct page *page = list_entry(pages->prev, - struct page, lru); + struct page *page = lru_to_page(pages); prefetchw(&page->flags); list_del(&page->lru); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 8eade7a993c1..5d0c05e288cc 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -306,7 +306,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; struct ceph_inode_info *ci = ceph_inode(inode); - struct page *page = list_entry(page_list->prev, struct page, lru); + struct page *page = lru_to_page(page_list); struct ceph_vino vino; struct ceph_osd_request *req; u64 off; @@ -333,8 +333,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, if (got) ceph_put_cap_refs(ci, got); while (!list_empty(page_list)) { - page = list_entry(page_list->prev, - struct page, lru); + page = lru_to_page(page_list); list_del(&page->lru); put_page(page); } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5e405164394a..e3e3a7550205 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "cifsfs.h" #include "cifspdu.h" @@ -3964,7 +3965,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, INIT_LIST_HEAD(tmplist); - page = list_entry(page_list->prev, struct page, lru); + page = lru_to_page(page_list); /* * Lock the page and put it in the cache. Since no one else diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index f461d75ac049..6aa282ee455a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -128,7 +128,7 @@ int ext4_mpage_readpages(struct address_space *mapping, prefetchw(&page->flags); if (pages) { - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, readahead_gfp_mask(mapping))) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index eb1ce30412dc..832c1759a09a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -397,7 +398,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ - last = list_entry(pages->prev, struct page, lru); + last = lru_to_page(pages); start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index fe53381b26b1..f038235c64bd 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -77,7 +77,7 @@ static int orangefs_readpages(struct file *file, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page; - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); if (!add_to_page_cache(page, mapping, diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d946f063cba..80bb6408fe73 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -171,6 +171,8 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 10191c28fc04..04ec454d44ce 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -124,7 +124,4 @@ static __always_inline enum lru_list page_lru(struct page *page) } return lru; } - -#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) - #endif diff --git a/mm/swap.c b/mm/swap.c index 4d8a1f1afaab..4929bc1be60e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -126,7 +126,7 @@ void put_pages_list(struct list_head *pages) while (!list_empty(pages)) { struct page *victim; - victim = list_entry(pages->prev, struct page, lru); + victim = lru_to_page(pages); list_del(&victim->lru); put_page(victim); } -- cgit v1.2.3-59-g8ed1b From b685a7350ae76bc0f388e24b36d06a63776c68ee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 3 Jan 2019 15:29:15 -0800 Subject: mm/page_io.c: fix polled swap page in swap_readpage() wants to do polling to bring in pages if asked to, but it doesn't mark the bio as being polled. Additionally, the looping around the blk_poll() check isn't correct - if we get a zero return, we should call io_schedule(), we can't just assume that the bio has completed. The regular bio->bi_private check should be used for that. Link: http://lkml.kernel.org/r/e15243a8-2cdf-c32c-ecee-f289377c8ef9@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Andrew Morton Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_io.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_io.c b/mm/page_io.c index d975fa3f02aa..2e8019d0e048 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -401,6 +401,8 @@ int swap_readpage(struct page *page, bool synchronous) get_task_struct(current); bio->bi_private = current; bio_set_op_attrs(bio, REQ_OP_READ, 0); + if (synchronous) + bio->bi_opf |= REQ_HIPRI; count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); @@ -410,7 +412,7 @@ int swap_readpage(struct page *page, bool synchronous) break; if (!blk_poll(disk->queue, qc, true)) - break; + io_schedule(); } __set_current_state(TASK_RUNNING); bio_put(bio); -- cgit v1.2.3-59-g8ed1b