From b5330628546616af14ff23075fbf8d4ad91f6e25 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 8 Sep 2015 14:58:28 -0700 Subject: mm: introduce vma_is_anonymous(vma) helper special_mapping_fault() is absolutely broken. It seems it was always wrong, but this didn't matter until vdso/vvar started to use more than one page. And after this change vma_is_anonymous() becomes really trivial, it simply checks vm_ops == NULL. However, I do think the helper makes sense. There are a lot of ->vm_ops != NULL checks, the helper makes the caller's code more understandable (self-documented) and this is more grep-friendly. This patch (of 3): Preparation. Add the new simple helper, vma_is_anonymous(vma), and change handle_pte_fault() to use it. It will have more users. The name is not accurate, say a hpet_mmap()'ed vma is not anonymous. Perhaps it should be named vma_has_fault() instead. But it matches the logic in mmap.c/memory.c (see next changes). "True" just means that a page fault will use do_anonymous_page(). Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Hugh Dickins Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index bb04d8f2f86c..882c9d7ae2f5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3267,12 +3267,12 @@ static int handle_pte_fault(struct mm_struct *mm, barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { - if (vma->vm_ops) + if (vma_is_anonymous(vma)) + return do_anonymous_page(mm, vma, address, + pte, pmd, flags); + else return do_fault(mm, vma, address, pte, pmd, flags, entry); - - return do_anonymous_page(mm, vma, address, pte, pmd, - flags); } return do_swap_page(mm, vma, address, pte, pmd, flags, entry); -- cgit v1.2.3-59-g8ed1b From 8a9cc3b55e9d20289cc18a65257e62c2dd4932fb Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 8 Sep 2015 14:58:31 -0700 Subject: mmap: fix the usage of ->vm_pgoff in special_mapping paths Test-case: #include #include #include #include #include #include void *find_vdso_vaddr(void) { FILE *perl; char buf[32] = {}; perl = popen("perl -e 'open STDIN,qq|/proc/@{[getppid]}/maps|;" "/^(.*?)-.*vdso/ && print hex $1 while <>'", "r"); fread(buf, sizeof(buf), 1, perl); fclose(perl); return (void *)atol(buf); } #define PAGE_SIZE 4096 int main(void) { void *vdso = find_vdso_vaddr(); assert(vdso); // of course they should differ, and they do so far printf("vdso pages differ: %d\n", !!memcmp(vdso, vdso + PAGE_SIZE, PAGE_SIZE)); // split into 2 vma's assert(mprotect(vdso, PAGE_SIZE, PROT_READ) == 0); // force another fault on the next check assert(madvise(vdso, 2 * PAGE_SIZE, MADV_DONTNEED) == 0); // now they no longer differ, the 2nd vm_pgoff is wrong printf("vdso pages differ: %d\n", !!memcmp(vdso, vdso + PAGE_SIZE, PAGE_SIZE)); return 0; } Output: vdso pages differ: 1 vdso pages differ: 0 This is because split_vma() correctly updates ->vm_pgoff, but the logic in insert_vm_struct() and special_mapping_fault() is absolutely broken, so the fault at vdso + PAGE_SIZE return the 1st page. The same happens if you simply unmap the 1st page. special_mapping_fault() does: pgoff = vmf->pgoff - vma->vm_pgoff; and this is _only_ correct if vma->vm_start mmaps the first page from ->vm_private_data array. vdso or any other user of install_special_mapping() is not anonymous, it has the "backing storage" even if it is just the array of pages. So we actually need to make vm_pgoff work as an offset in this array. Note: this also allows to fix another problem: currently gdb can't access "[vvar]" memory because in this case special_mapping_fault() doesn't work. Now that we can use ->vm_pgoff we can implement ->access() and fix this. Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Hugh Dickins Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 82db4fc0a9d3..52b2f6e16f6f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2884,7 +2884,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ - if (!vma->vm_file) { + if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } @@ -3027,21 +3027,13 @@ static int special_mapping_fault(struct vm_area_struct *vma, pgoff_t pgoff; struct page **pages; - /* - * special mappings have no vm_file, and in that case, the mm - * uses vm_pgoff internally. So we have to subtract it from here. - * We are allowed to do this because we are the mm; do not copy - * this code into drivers! - */ - pgoff = vmf->pgoff - vma->vm_pgoff; - if (vma->vm_ops == &legacy_special_mapping_vmops) pages = vma->vm_private_data; else pages = ((struct vm_special_mapping *)vma->vm_private_data)-> pages; - for (; pgoff && *pages; ++pages) + for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { -- cgit v1.2.3-59-g8ed1b From ce75799b83aaf3fd592e21531a9532bed157c6b5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 8 Sep 2015 14:58:34 -0700 Subject: mremap: fix the wrong !vma->vm_file check in copy_vma() Test-case: #define _GNU_SOURCE #include #include #include #include #include #include void *find_vdso_vaddr(void) { FILE *perl; char buf[32] = {}; perl = popen("perl -e 'open STDIN,qq|/proc/@{[getppid]}/maps|;" "/^(.*?)-.*vdso/ && print hex $1 while <>'", "r"); fread(buf, sizeof(buf), 1, perl); fclose(perl); return (void *)atol(buf); } #define PAGE_SIZE 4096 void *get_unmapped_area(void) { void *p = mmap(0, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1,0); assert(p != MAP_FAILED); munmap(p, PAGE_SIZE); return p; } char save[2][PAGE_SIZE]; int main(void) { void *vdso = find_vdso_vaddr(); void *page[2]; assert(vdso); memcpy(save, vdso, sizeof (save)); // force another fault on the next check assert(madvise(vdso, 2 * PAGE_SIZE, MADV_DONTNEED) == 0); page[0] = mremap(vdso, PAGE_SIZE, PAGE_SIZE, MREMAP_FIXED | MREMAP_MAYMOVE, get_unmapped_area()); page[1] = mremap(vdso + PAGE_SIZE, PAGE_SIZE, PAGE_SIZE, MREMAP_FIXED | MREMAP_MAYMOVE, get_unmapped_area()); assert(page[0] != MAP_FAILED && page[1] != MAP_FAILED); printf("match: %d %d\n", !memcmp(save[0], page[0], PAGE_SIZE), !memcmp(save[1], page[1], PAGE_SIZE)); return 0; } fails without this patch. Before the previous commit it gets the wrong page, now it segfaults (which is imho better). This is because copy_vma() wrongly assumes that if vma->vm_file == NULL is irrelevant until the first fault which will use do_anonymous_page(). This is obviously wrong for the special mapping. Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Hugh Dickins Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 52b2f6e16f6f..52a2373d0ed4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2918,7 +2918,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ - if (unlikely(!vma->vm_file && !vma->anon_vma)) { + if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } -- cgit v1.2.3-59-g8ed1b From e1b9996b85ba3ff143ded04523cd015762d20f03 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:58:37 -0700 Subject: thp: vma_adjust_trans_huge(): adjust file-backed VMA too This series of patches adds support for using PMD page table entries to map DAX files. We expect NV-DIMMs to start showing up that are many gigabytes in size and the memory consumption of 4kB PTEs will be astronomical. The patch series leverages much of the Transparant Huge Pages infrastructure, going so far as to borrow one of Kirill's patches from his THP page cache series. This patch (of 10): Since we're going to have huge pages in page cache, we need to call adjust file-backed VMA, which potentially can contain huge pages. For now we call it for all VMAs. Probably later we will need to introduce a flag to indicate that the VMA has huge pages. Signed-off-by: Kirill A. Shutemov Signed-off-by: Matthew Wilcox Acked-by: Hillf Danton Cc: Theodore Ts'o Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 11 +---------- mm/huge_memory.c | 2 +- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f10b20f05159..1c53c7d7ef7e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -122,7 +122,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, #endif extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); -extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, +extern void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next); @@ -138,15 +138,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, else return 0; } -static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - long adjust_next) -{ - if (!vma->anon_vma || vma->vm_ops) - return; - __vma_adjust_trans_huge(vma, start, end, adjust_next); -} static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 279a818a39b1..4d5fcb630d32 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2991,7 +2991,7 @@ static void split_huge_page_address(struct mm_struct *mm, split_huge_page_pmd_mm(mm, address, pmd); } -void __vma_adjust_trans_huge(struct vm_area_struct *vma, +void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) -- cgit v1.2.3-59-g8ed1b From 7c414164593514f76b422faae0824bdd3754209b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 8 Sep 2015 14:58:43 -0700 Subject: dax: revert userfaultfd change Undo the change which "userfaultfd: call handle_userfault() for userfaultfd_missing() faults" made to set_huge_zero_page(). DAX will need that return value. Cc: Andrea Arcangeli Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4d5fcb630d32..ca475dfdb28f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -794,16 +794,19 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) } /* Caller must hold page table lock. */ -static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, +static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) { pmd_t entry; + if (!pmd_none(*pmd)) + return false; entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); atomic_long_inc(&mm->nr_ptes); + return true; } int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, -- cgit v1.2.3-59-g8ed1b From 4897c7655d9419ba7e62bac145ec6a1847134d93 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:58:45 -0700 Subject: thp: prepare for DAX huge pages Add a vma_is_dax() helper macro to test whether the VMA is DAX, and use it in zap_huge_pmd() and __split_huge_page_pmd(). [akpm@linux-foundation.org: fix build] Signed-off-by: Matthew Wilcox Cc: Hillf Danton Cc: "Kirill A. Shutemov" Cc: Theodore Ts'o Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/dax.h | 4 ++++ mm/huge_memory.c | 48 +++++++++++++++++++++++++++++------------------- 2 files changed, 33 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/include/linux/dax.h b/include/linux/dax.h index 4f27d3dbf6e8..9b51f9d40ad9 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -18,4 +18,8 @@ int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) +static inline bool vma_is_dax(struct vm_area_struct *vma) +{ + return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); +} #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ca475dfdb28f..9057241d5722 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -794,7 +795,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) } /* Caller must hold page table lock. */ -static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, +bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) { @@ -1421,7 +1422,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, int ret = 0; if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { - struct page *page; pgtable_t pgtable; pmd_t orig_pmd; /* @@ -1433,13 +1433,22 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, tlb->fullmm); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); + if (vma_is_dax(vma)) { + if (is_huge_zero_pmd(orig_pmd)) { + pgtable = NULL; + } else { + spin_unlock(ptl); + return 1; + } + } else { + pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); + } if (is_huge_zero_pmd(orig_pmd)) { atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); put_huge_zero_page(); } else { - page = pmd_page(orig_pmd); + struct page *page = pmd_page(orig_pmd); page_remove_rmap(page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); @@ -1448,7 +1457,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, spin_unlock(ptl); tlb_remove_page(tlb, page); } - pte_free(tlb->mm, pgtable); + if (pgtable) + pte_free(tlb->mm, pgtable); ret = 1; } return ret; @@ -2914,7 +2924,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) { spinlock_t *ptl; - struct page *page; + struct page *page = NULL; struct mm_struct *mm = vma->vm_mm; unsigned long haddr = address & HPAGE_PMD_MASK; unsigned long mmun_start; /* For mmu_notifiers */ @@ -2927,25 +2937,25 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, again: mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_trans_huge(*pmd))) { - spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - return; - } - if (is_huge_zero_pmd(*pmd)) { + if (unlikely(!pmd_trans_huge(*pmd))) + goto unlock; + if (vma_is_dax(vma)) { + pmdp_huge_clear_flush(vma, haddr, pmd); + } else if (is_huge_zero_pmd(*pmd)) { __split_huge_zero_page_pmd(vma, haddr, pmd); - spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - return; + } else { + page = pmd_page(*pmd); + VM_BUG_ON_PAGE(!page_count(page), page); + get_page(page); } - page = pmd_page(*pmd); - VM_BUG_ON_PAGE(!page_count(page), page); - get_page(page); + unlock: spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - split_huge_page(page); + if (!page) + return; + split_huge_page(page); put_page(page); /* -- cgit v1.2.3-59-g8ed1b From b96375f74a6d4f39fc6cbdc0bce5175115c7f96f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:58:48 -0700 Subject: mm: add a pmd_fault handler Allow non-anonymous VMAs to provide huge pages in response to a page fault. Signed-off-by: Matthew Wilcox Cc: Hillf Danton Cc: "Kirill A. Shutemov" Cc: Theodore Ts'o Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/memory.c | 30 ++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index dfb7ce05f1e3..e1fbd18b8c4b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -249,6 +249,8 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pmd_fault)(struct vm_area_struct *, unsigned long address, + pmd_t *, unsigned int flags); void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); /* notification that a previously read-only page is about to become diff --git a/mm/memory.c b/mm/memory.c index 882c9d7ae2f5..a3f9a8ccec0f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3232,6 +3232,27 @@ out: return 0; } +static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags) +{ + if (!vma->vm_ops) + return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + +static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, pmd_t orig_pmd, + unsigned int flags) +{ + if (!vma->vm_ops) + return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most @@ -3334,10 +3355,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (!pmd) return VM_FAULT_OOM; if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { - int ret = VM_FAULT_FALLBACK; - if (!vma->vm_ops) - ret = do_huge_pmd_anonymous_page(mm, vma, address, - pmd, flags); + int ret = create_huge_pmd(mm, vma, address, pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { @@ -3361,8 +3379,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, orig_pmd, pmd); if (dirty && !pmd_write(orig_pmd)) { - ret = do_huge_pmd_wp_page(mm, vma, address, pmd, - orig_pmd); + ret = wp_huge_pmd(mm, vma, address, pmd, + orig_pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { -- cgit v1.2.3-59-g8ed1b From fc43704437ebe40f642ac53f7ee73661fe74e6b8 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:58:51 -0700 Subject: mm: export various functions for the benefit of DAX To use the huge zero page in DAX, we need these functions exported. Signed-off-by: Matthew Wilcox Cc: Hillf Danton Cc: "Kirill A. Shutemov" Cc: Theodore Ts'o Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 10 ++++++++++ mm/huge_memory.c | 7 +------ 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1c53c7d7ef7e..70587ea079c3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -155,6 +155,16 @@ static inline bool is_huge_zero_page(struct page *page) return ACCESS_ONCE(huge_zero_page) == page; } +static inline bool is_huge_zero_pmd(pmd_t pmd) +{ + return is_huge_zero_page(pmd_page(pmd)); +} + +struct page *get_huge_zero_page(void); +bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long haddr, + pmd_t *pmd, struct page *zero_page); + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9057241d5722..c426a89e025c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -173,12 +173,7 @@ fail: static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; -static inline bool is_huge_zero_pmd(pmd_t pmd) -{ - return is_huge_zero_page(pmd_page(pmd)); -} - -static struct page *get_huge_zero_page(void) +struct page *get_huge_zero_page(void) { struct page *zero_page; retry: -- cgit v1.2.3-59-g8ed1b From 5cad465d7fa646bad3d677df276bfc8e2ad709e3 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:58:54 -0700 Subject: mm: add vmf_insert_pfn_pmd() Similar to vm_insert_pfn(), but for PMDs rather than PTEs. The 'vmf_' prefix instead of 'vm_' prefix is intended to indicate that it returns a VMF_ value rather than an errno (which would only have to be converted into a VMF_ value anyway). Signed-off-by: Matthew Wilcox Cc: Hillf Danton Cc: "Kirill A. Shutemov" Cc: Theodore Ts'o Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'mm') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 70587ea079c3..f9b612fec4dd 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); +int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, + unsigned long pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c426a89e025c..3ea6f908a5e0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -869,6 +869,49 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, flags); } +static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write) +{ + struct mm_struct *mm = vma->vm_mm; + pmd_t entry; + spinlock_t *ptl; + + ptl = pmd_lock(mm, pmd); + if (pmd_none(*pmd)) { + entry = pmd_mkhuge(pfn_pmd(pfn, prot)); + if (write) { + entry = pmd_mkyoung(pmd_mkdirty(entry)); + entry = maybe_pmd_mkwrite(entry, vma); + } + set_pmd_at(mm, addr, pmd, entry); + update_mmu_cache_pmd(vma, addr, pmd); + } + spin_unlock(ptl); + return VM_FAULT_NOPAGE; +} + +int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned long pfn, bool write) +{ + pgprot_t pgprot = vma->vm_page_prot; + /* + * If we had pmd_special, we could avoid all these restrictions, + * but we need to be consistent with PTEs and architectures that + * can't support a 'special' bit. + */ + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); + BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == + (VM_PFNMAP|VM_MIXEDMAP)); + BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); + BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + if (track_pfn_insert(vma, &pgprot, pfn)) + return VM_FAULT_SIGBUS; + return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); +} + int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) -- cgit v1.2.3-59-g8ed1b From ae18d6dcf57b56b984ff27fd55b4e2caf5bfbd44 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:59:14 -0700 Subject: thp: change insert_pfn's return type to void It would make more sense to have all the return values from vmf_insert_pfn_pmd() encoded in one place instead of having to follow the convention into insert_pfn(). Suggested by Jeff Moyer. Signed-off-by: Matthew Wilcox Cc: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3ea6f908a5e0..5df0d1597c15 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -869,7 +869,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, flags); } -static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, +static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write) { struct mm_struct *mm = vma->vm_mm; @@ -887,7 +887,6 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache_pmd(vma, addr, pmd); } spin_unlock(ptl); - return VM_FAULT_NOPAGE; } int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -909,7 +908,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, return VM_FAULT_SIGBUS; if (track_pfn_insert(vma, &pgprot, pfn)) return VM_FAULT_SIGBUS; - return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); + insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); + return VM_FAULT_NOPAGE; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, -- cgit v1.2.3-59-g8ed1b From 843172978bb92997310d2f7fbc172ece423cfc02 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 8 Sep 2015 14:59:25 -0700 Subject: dax: fix race between simultaneous faults If two threads write-fault on the same hole at the same time, the winner of the race will return to userspace and complete their store, only to have the loser overwrite their store with zeroes. Fix this for now by taking the i_mmap_sem for write instead of read, and do so outside the call to get_block(). Now the loser of the race will see the block has already been zeroed, and will not zero it again. This severely limits our scalability. I have ideas for improving it, but those can wait for a later patch. Signed-off-by: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 33 +++++++++++++++++---------------- mm/memory.c | 11 ++++++++--- 2 files changed, 25 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/fs/dax.c b/fs/dax.c index c694117a7062..9593f4bee327 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -272,7 +272,6 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh, static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, struct vm_area_struct *vma, struct vm_fault *vmf) { - struct address_space *mapping = inode->i_mapping; sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); unsigned long vaddr = (unsigned long)vmf->virtual_address; void *addr; @@ -280,8 +279,6 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, pgoff_t size; int error; - i_mmap_lock_read(mapping); - /* * Check truncate didn't happen while we were allocating a block. * If it did, this block may or may not be still allocated to the @@ -309,8 +306,6 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, error = vm_insert_mixed(vma, vaddr, pfn); out: - i_mmap_unlock_read(mapping); - return error; } @@ -372,15 +367,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, * from a read fault and we've raced with a truncate */ error = -EIO; - goto unlock_page; + goto unlock; } + } else { + i_mmap_lock_write(mapping); } error = get_block(inode, block, &bh, 0); if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; /* fs corruption? */ if (error) - goto unlock_page; + goto unlock; if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { if (vmf->flags & FAULT_FLAG_WRITE) { @@ -391,8 +388,9 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; if (error) - goto unlock_page; + goto unlock; } else { + i_mmap_unlock_write(mapping); return dax_load_hole(mapping, page, vmf); } } @@ -404,17 +402,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, else clear_user_highpage(new_page, vaddr); if (error) - goto unlock_page; + goto unlock; vmf->page = page; if (!page) { - i_mmap_lock_read(mapping); /* Check we didn't race with truncate */ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (vmf->pgoff >= size) { - i_mmap_unlock_read(mapping); error = -EIO; - goto out; + goto unlock; } } return VM_FAULT_LOCKED; @@ -450,6 +446,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); } + if (!page) + i_mmap_unlock_write(mapping); out: if (error == -ENOMEM) return VM_FAULT_OOM | major; @@ -458,11 +456,14 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; - unlock_page: + unlock: if (page) { unlock_page(page); page_cache_release(page); + } else { + i_mmap_unlock_write(mapping); } + goto out; } EXPORT_SYMBOL(__dax_fault); @@ -540,10 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); bh.b_size = PMD_SIZE; + i_mmap_lock_write(mapping); length = get_block(inode, block, &bh, write); if (length) return VM_FAULT_SIGBUS; - i_mmap_lock_read(mapping); /* * If the filesystem isn't willing to tell us the length of a hole, @@ -607,11 +608,11 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, } out: - i_mmap_unlock_read(mapping); - if (buffer_unwritten(&bh)) complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); + i_mmap_unlock_write(mapping); + return result; fallback: diff --git a/mm/memory.c b/mm/memory.c index a3f9a8ccec0f..320c42e95e69 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2427,11 +2427,16 @@ void unmap_mapping_range(struct address_space *mapping, details.last_index = ULONG_MAX; - /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ - i_mmap_lock_write(mapping); + /* + * DAX already holds i_mmap_lock to serialise file truncate vs + * page fault and page fault vs page fault. + */ + if (!IS_DAX(mapping->host)) + i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); - i_mmap_unlock_write(mapping); + if (!IS_DAX(mapping->host)) + i_mmap_unlock_write(mapping); } EXPORT_SYMBOL(unmap_mapping_range); -- cgit v1.2.3-59-g8ed1b From 5b701b846aad7909d20693bcced2522d0ce8d1bc Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:59:28 -0700 Subject: thp: decrement refcount on huge zero page if it is split The DAX code neglected to put the refcount on the huge zero page. Also we must notify on splits. Signed-off-by: Kirill A. Shutemov Signed-off-by: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5df0d1597c15..7510b6f683e9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2978,7 +2978,9 @@ again: if (unlikely(!pmd_trans_huge(*pmd))) goto unlock; if (vma_is_dax(vma)) { - pmdp_huge_clear_flush(vma, haddr, pmd); + pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); + if (is_huge_zero_pmd(_pmd)) + put_huge_zero_page(); } else if (is_huge_zero_pmd(*pmd)) { __split_huge_zero_page_pmd(vma, haddr, pmd); } else { -- cgit v1.2.3-59-g8ed1b From da146769004e1dd5ed06853e6d009be8ca675d5f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:59:31 -0700 Subject: thp: fix zap_huge_pmd() for DAX The original DAX code assumed that pgtable_t was a pointer, which isn't true on all architectures. Restructure the code to not rely on that assumption. [willy@linux.intel.com: further fixes integrated into this patch] Signed-off-by: Kirill A. Shutemov Signed-off-by: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 71 +++++++++++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7510b6f683e9..96dfd9d81fcb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1456,50 +1456,41 @@ out: int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { + pmd_t orig_pmd; spinlock_t *ptl; - int ret = 0; - if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { - pgtable_t pgtable; - pmd_t orig_pmd; - /* - * For architectures like ppc64 we look at deposited pgtable - * when calling pmdp_huge_get_and_clear. So do the - * pgtable_trans_huge_withdraw after finishing pmdp related - * operations. - */ - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, - tlb->fullmm); - tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_dax(vma)) { - if (is_huge_zero_pmd(orig_pmd)) { - pgtable = NULL; - } else { - spin_unlock(ptl); - return 1; - } - } else { - pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); - } - if (is_huge_zero_pmd(orig_pmd)) { - atomic_long_dec(&tlb->mm->nr_ptes); - spin_unlock(ptl); + if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) + return 0; + /* + * For architectures like ppc64 we look at deposited pgtable + * when calling pmdp_huge_get_and_clear. So do the + * pgtable_trans_huge_withdraw after finishing pmdp related + * operations. + */ + orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, + tlb->fullmm); + tlb_remove_pmd_tlb_entry(tlb, pmd, addr); + if (vma_is_dax(vma)) { + spin_unlock(ptl); + if (is_huge_zero_pmd(orig_pmd)) put_huge_zero_page(); - } else { - struct page *page = pmd_page(orig_pmd); - page_remove_rmap(page); - VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); - add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); - VM_BUG_ON_PAGE(!PageHead(page), page); - atomic_long_dec(&tlb->mm->nr_ptes); - spin_unlock(ptl); - tlb_remove_page(tlb, page); - } - if (pgtable) - pte_free(tlb->mm, pgtable); - ret = 1; + } else if (is_huge_zero_pmd(orig_pmd)) { + pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); + atomic_long_dec(&tlb->mm->nr_ptes); + spin_unlock(ptl); + put_huge_zero_page(); + } else { + struct page *page = pmd_page(orig_pmd); + page_remove_rmap(page); + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); + add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + VM_BUG_ON_PAGE(!PageHead(page), page); + pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); + atomic_long_dec(&tlb->mm->nr_ptes); + spin_unlock(ptl); + tlb_remove_page(tlb, page); } - return ret; + return 1; } int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, -- cgit v1.2.3-59-g8ed1b From d295e3415a88ae63a37a22652808b20c7fcb970e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:59:34 -0700 Subject: dax: don't use set_huge_zero_page() This is another place where DAX assumed that pgtable_t was a pointer. Open code the important parts of set_huge_zero_page() in DAX and make set_huge_zero_page() static again. Signed-off-by: Kirill A. Shutemov Signed-off-by: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 18 ++++++++++++------ include/linux/huge_mm.h | 3 --- mm/huge_memory.c | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/fs/dax.c b/fs/dax.c index 9593f4bee327..d778e5f1a01c 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -572,18 +572,24 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { - bool set; spinlock_t *ptl; - struct mm_struct *mm = vma->vm_mm; + pmd_t entry; struct page *zero_page = get_huge_zero_page(); + if (unlikely(!zero_page)) goto fallback; - ptl = pmd_lock(mm, pmd); - set = set_huge_zero_page(NULL, mm, vma, pmd_addr, pmd, - zero_page); - spin_unlock(ptl); + ptl = pmd_lock(vma->vm_mm, pmd); + if (!pmd_none(*pmd)) { + spin_unlock(ptl); + goto fallback; + } + + entry = mk_pmd(zero_page, vma->vm_page_prot); + entry = pmd_mkhuge(entry); + set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); result = VM_FAULT_NOPAGE; + spin_unlock(ptl); } else { sector = bh.b_blocknr << (blkbits - 9); length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f9b612fec4dd..ecb080d6ff42 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -163,9 +163,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) } struct page *get_huge_zero_page(void); -bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long haddr, - pmd_t *pmd, struct page *zero_page); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 96dfd9d81fcb..3e574efad8f8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -790,7 +790,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) } /* Caller must hold page table lock. */ -bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, +static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) { -- cgit v1.2.3-59-g8ed1b From 46c043ede4711e8d598b9d63c5616c1fedb0605e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:59:42 -0700 Subject: mm: take i_mmap_lock in unmap_mapping_range() for DAX DAX is not so special: we need i_mmap_lock to protect mapping->i_mmap. __dax_pmd_fault() uses unmap_mapping_range() shoot out zero page from all mappings. We need to drop i_mmap_lock there to avoid lock deadlock. Re-aquiring the lock should be fine since we check i_size after the point. Signed-off-by: Kirill A. Shutemov Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 35 +++++++++++++++++++---------------- mm/memory.c | 11 ++--------- 2 files changed, 21 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/fs/dax.c b/fs/dax.c index 9ef9b80cc132..ed54efedade6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -554,6 +554,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) goto fallback; + if (buffer_unwritten(&bh) || buffer_new(&bh)) { + int i; + for (i = 0; i < PTRS_PER_PMD; i++) + clear_page(kaddr + i * PAGE_SIZE); + count_vm_event(PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + result |= VM_FAULT_MAJOR; + } + + /* + * If we allocated new storage, make sure no process has any + * zero pages covering this hole + */ + if (buffer_new(&bh)) { + i_mmap_unlock_write(mapping); + unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); + i_mmap_lock_write(mapping); + } + /* * If a truncate happened while we were allocating blocks, we may * leave blocks allocated to the file that are beyond EOF. We can't @@ -568,13 +587,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if ((pgoff | PG_PMD_COLOUR) >= size) goto fallback; - /* - * If we allocated new storage, make sure no process has any - * zero pages covering this hole - */ - if (buffer_new(&bh)) - unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); - if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { spinlock_t *ptl; pmd_t entry; @@ -605,15 +617,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) goto fallback; - if (buffer_unwritten(&bh) || buffer_new(&bh)) { - int i; - for (i = 0; i < PTRS_PER_PMD; i++) - clear_page(kaddr + i * PAGE_SIZE); - count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); - result |= VM_FAULT_MAJOR; - } - result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); } diff --git a/mm/memory.c b/mm/memory.c index 320c42e95e69..ce8e983f3c4d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2426,17 +2426,10 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; - - /* - * DAX already holds i_mmap_lock to serialise file truncate vs - * page fault and page fault vs page fault. - */ - if (!IS_DAX(mapping->host)) - i_mmap_lock_write(mapping); + i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); - if (!IS_DAX(mapping->host)) - i_mmap_unlock_write(mapping); + i_mmap_unlock_write(mapping); } EXPORT_SYMBOL(unmap_mapping_range); -- cgit v1.2.3-59-g8ed1b From 52a2b53ffde6d6018dfc454fbde34383351fb896 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Sep 2015 14:59:45 -0700 Subject: mm, dax: use i_mmap_unlock_write() in do_cow_fault() __dax_fault() takes i_mmap_lock for write. Let's pair it with write unlock on do_cow_fault() side. Signed-off-by: Kirill A. Shutemov Acked-by: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index ce8e983f3c4d..6cd0b2160401 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3013,9 +3013,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else { /* * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. + * i_mmap_lock for write to protect against truncate. */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); } goto uncharge_out; } @@ -3029,9 +3029,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else { /* * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. + * i_mmap_lock for write to protect against truncate. */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); } return ret; uncharge_out: -- cgit v1.2.3-59-g8ed1b From 904a9553d4fcdc0c7d5621f6178f0e07598701dc Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 8 Sep 2015 14:59:48 -0700 Subject: mm/page_alloc.c: refine the calculation of highest possible node id nr_node_ids records the highest possible node id, which is calculated by scanning the bitmap node_states[N_POSSIBLE]. Current implementation scan the bitmap from the beginning, which will scan the whole bitmap. This patch reverses the order by scanning from the end with find_last_bit(). Signed-off-by: Wei Yang Cc: Tejun Heo Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b5240b7f642..809e27e77b3a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5478,11 +5478,9 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, */ void __init setup_nr_node_ids(void) { - unsigned int node; - unsigned int highest = 0; + unsigned int highest; - for_each_node_mask(node, node_possible_map) - highest = node; + highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); nr_node_ids = highest + 1; } #endif -- cgit v1.2.3-59-g8ed1b From 7f3eb55bfad8a6dfd880559210f5b21737d69815 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 8 Sep 2015 14:59:50 -0700 Subject: mm/page_alloc.c: remove unused variable in free_area_init_core() Commit febd5949e134 ("mm/memory hotplug: init the zone's size when calculating node totalpages") refines the function free_area_init_core(). After doing so, these two parameters are not used anymore. This patch removes these two parameters. Signed-off-by: Wei Yang Cc: Gu Zheng Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 809e27e77b3a..badc7d3bde43 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5303,8 +5303,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, * * NOTE: pgdat should get zeroed by caller. */ -static void __paginginit free_area_init_core(struct pglist_data *pgdat, - unsigned long node_start_pfn, unsigned long node_end_pfn) +static void __paginginit free_area_init_core(struct pglist_data *pgdat) { enum zone_type j; int nid = pgdat->node_id; @@ -5467,7 +5466,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, (unsigned long)pgdat->node_mem_map); #endif - free_area_init_core(pgdat, start_pfn, end_pfn); + free_area_init_core(pgdat); } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -- cgit v1.2.3-59-g8ed1b From 4fcab5f437c481e51c270a2d12ef56a3f2367371 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 8 Sep 2015 14:59:53 -0700 Subject: mm/memblock.c: WARN_ON when flags differs from overlap region Each memblock_region has flags to indicates the type of this range. For the overlap case, memblock_add_range() inserts the lower part and leave the upper part as indicated in the overlapped region. If the flags of the new range differs from the overlapped region, the information recorded is not correct. This patch adds a WARN_ON when the flags of the new range differs from the overlapped region. Signed-off-by: Wei Yang Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 95ce68c6da8a..bde61e8c28c5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -569,6 +569,7 @@ repeat: #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP WARN_ON(nid != memblock_get_region_node(rgn)); #endif + WARN_ON(flags != rgn->flags); nr_new++; if (insert) memblock_insert_region(type, i++, base, -- cgit v1.2.3-59-g8ed1b From 06f805965fc205e27681eee99fd2376fafd8da65 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 8 Sep 2015 15:00:16 -0700 Subject: memtest: use kstrtouint instead of simple_strtoul Since simple_strtoul is obsolete and memtest_pattern is type of int, use kstrtouint instead. Signed-off-by: Vladimir Murzin Cc: Leon Romanovsky Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memtest.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memtest.c b/mm/memtest.c index 0a1cc133f6d7..20e836138e00 100644 --- a/mm/memtest.c +++ b/mm/memtest.c @@ -89,16 +89,18 @@ static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) } /* default is disabled */ -static int memtest_pattern __initdata; +static unsigned int memtest_pattern __initdata; static int __init parse_memtest(char *arg) { + int ret = 0; + if (arg) - memtest_pattern = simple_strtoul(arg, NULL, 0); + ret = kstrtouint(arg, 0, &memtest_pattern); else memtest_pattern = ARRAY_SIZE(patterns); - return 0; + return ret; } early_param("memtest", parse_memtest); -- cgit v1.2.3-59-g8ed1b From f373bafcad68834761b40da9cecda842f43d4797 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 8 Sep 2015 15:00:19 -0700 Subject: memtest: cleanup log messages - prefer pr_info(... to printk(KERN_INFO ... - use %pa for phys_addr_t - use cpu_to_be64 while printing pattern in reserve_bad_mem() Signed-off-by: Vladimir Murzin Cc: Leon Romanovsky Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memtest.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memtest.c b/mm/memtest.c index 20e836138e00..6bcf7d89adfc 100644 --- a/mm/memtest.c +++ b/mm/memtest.c @@ -31,10 +31,8 @@ static u64 patterns[] __initdata = { static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad) { - printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n", - (unsigned long long) pattern, - (unsigned long long) start_bad, - (unsigned long long) end_bad); + pr_info(" %016llx bad mem addr %pa - %pa reserved\n", + cpu_to_be64(pattern), &start_bad, &end_bad); memblock_reserve(start_bad, end_bad - start_bad); } @@ -79,10 +77,8 @@ static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) this_start = clamp(this_start, start, end); this_end = clamp(this_end, start, end); if (this_start < this_end) { - printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", - (unsigned long long)this_start, - (unsigned long long)this_end, - (unsigned long long)cpu_to_be64(pattern)); + pr_info(" %pa - %pa pattern %016llx\n", + &this_start, &this_end, cpu_to_be64(pattern)); memtest(pattern, this_start, this_end - this_start); } } @@ -113,7 +109,7 @@ void __init early_memtest(phys_addr_t start, phys_addr_t end) if (!memtest_pattern) return; - printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); + pr_info("early_memtest: # of tests: %u\n", memtest_pattern); for (i = memtest_pattern-1; i < UINT_MAX; --i) { idx = i % ARRAY_SIZE(patterns); do_one_pass(patterns[idx], start, end); -- cgit v1.2.3-59-g8ed1b From 3115aec4513e5bcb399235cac98a5637fe641c13 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 8 Sep 2015 15:00:22 -0700 Subject: memtest: remove unused header files memtest does not require these headers to be included. Signed-off-by: Vladimir Murzin Cc: Leon Romanovsky Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memtest.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/memtest.c b/mm/memtest.c index 6bcf7d89adfc..8eaa4c3a5f65 100644 --- a/mm/memtest.c +++ b/mm/memtest.c @@ -1,11 +1,6 @@ #include -#include -#include #include -#include -#include #include -#include #include static u64 patterns[] __initdata = { -- cgit v1.2.3-59-g8ed1b From 8334b96221ff0dcbde4873d31eb4d84774ed8ed4 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 8 Sep 2015 15:00:24 -0700 Subject: mm: /proc/pid/smaps:: show proportional swap share of the mapping We want to know per-process workingset size for smart memory management on userland and we use swap(ex, zram) heavily to maximize memory efficiency so workingset includes swap as well as RSS. On such system, if there are lots of shared anonymous pages, it's really hard to figure out exactly how many each process consumes memory(ie, rss + wap) if the system has lots of shared anonymous memory(e.g, android). This patch introduces SwapPss field on /proc//smaps so we can get more exact workingset size per process. Bongkyu tested it. Result is below. 1. 50M used swap SwapTotal: 461976 kB SwapFree: 411192 kB $ adb shell cat /proc/*/smaps | grep "SwapPss:" | awk '{sum += $2} END {print sum}'; 48236 $ adb shell cat /proc/*/smaps | grep "Swap:" | awk '{sum += $2} END {print sum}'; 141184 2. 240M used swap SwapTotal: 461976 kB SwapFree: 216808 kB $ adb shell cat /proc/*/smaps | grep "SwapPss:" | awk '{sum += $2} END {print sum}'; 230315 $ adb shell cat /proc/*/smaps | grep "Swap:" | awk '{sum += $2} END {print sum}'; 1387744 [akpm@linux-foundation.org: simplify kunmap_atomic() call] Signed-off-by: Minchan Kim Reported-by: Bongkyu Kim Tested-by: Bongkyu Kim Cc: Hugh Dickins Cc: Sergey Senozhatsky Cc: Jonathan Corbet Cc: Jerome Marchand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 18 +++++++++++----- fs/proc/task_mmu.c | 18 ++++++++++++++-- include/linux/swap.h | 6 ++++++ mm/swapfile.c | 42 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 77 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 6f7fafde0884..d411ca63c8b6 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -424,6 +424,7 @@ Private_Dirty: 0 kB Referenced: 892 kB Anonymous: 0 kB Swap: 0 kB +SwapPss: 0 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 374 kB @@ -433,16 +434,23 @@ the first of these lines shows the same information as is displayed for the mapping in /proc/PID/maps. The remaining lines show the size of the mapping (size), the amount of the mapping that is currently resident in RAM (RSS), the process' proportional share of this mapping (PSS), the number of clean and -dirty private pages in the mapping. Note that even a page which is part of a -MAP_SHARED mapping, but has only a single pte mapped, i.e. is currently used -by only one process, is accounted as private and not as shared. "Referenced" -indicates the amount of memory currently marked as referenced or accessed. +dirty private pages in the mapping. + +The "proportional set size" (PSS) of a process is the count of pages it has +in memory, where each page is divided by the number of processes sharing it. +So if a process has 1000 pages all to itself, and 1000 shared with one other +process, its PSS will be 1500. +Note that even a page which is part of a MAP_SHARED mapping, but has only +a single pte mapped, i.e. is currently used by only one process, is accounted +as private and not as shared. +"Referenced" indicates the amount of memory currently marked as referenced or +accessed. "Anonymous" shows the amount of memory that does not belong to any file. Even a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE and a page is modified, the file page is replaced by a private anonymous copy. "Swap" shows how much would-be-anonymous memory is also used, but out on swap. - +"SwapPss" shows proportional swap share of this mapping. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter encoded manner. The codes are the following: diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 67c76468a7be..41f1a50c10c9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -446,6 +446,7 @@ struct mem_size_stats { unsigned long anonymous_thp; unsigned long swap; u64 pss; + u64 swap_pss; }; static void smaps_account(struct mem_size_stats *mss, struct page *page, @@ -492,9 +493,20 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else if (is_swap_pte(*pte)) { swp_entry_t swpent = pte_to_swp_entry(*pte); - if (!non_swap_entry(swpent)) + if (!non_swap_entry(swpent)) { + int mapcount; + mss->swap += PAGE_SIZE; - else if (is_migration_entry(swpent)) + mapcount = swp_swapcount(swpent); + if (mapcount >= 2) { + u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; + + do_div(pss_delta, mapcount); + mss->swap_pss += pss_delta; + } else { + mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; + } + } else if (is_migration_entry(swpent)) page = migration_entry_to_page(swpent); } @@ -640,6 +652,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) "Anonymous: %8lu kB\n" "AnonHugePages: %8lu kB\n" "Swap: %8lu kB\n" + "SwapPss: %8lu kB\n" "KernelPageSize: %8lu kB\n" "MMUPageSize: %8lu kB\n" "Locked: %8lu kB\n", @@ -654,6 +667,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) mss.anonymous >> 10, mss.anonymous_thp >> 10, mss.swap >> 10, + (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)), vma_kernel_pagesize(vma) >> 10, vma_mmu_pagesize(vma) >> 10, (vma->vm_flags & VM_LOCKED) ? diff --git a/include/linux/swap.h b/include/linux/swap.h index 31496d201fdc..6282f1eb3d6a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -431,6 +431,7 @@ extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); +extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern int reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); @@ -522,6 +523,11 @@ static inline int page_swapcount(struct page *page) return 0; } +static inline int swp_swapcount(swp_entry_t entry) +{ + return 0; +} + #define reuse_swap_page(page) (page_mapcount(page) == 1) static inline int try_to_free_swap(struct page *page) diff --git a/mm/swapfile.c b/mm/swapfile.c index aebc2dd6e649..58877312cf6b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -874,6 +874,48 @@ int page_swapcount(struct page *page) return count; } +/* + * How many references to @entry are currently swapped out? + * This considers COUNT_CONTINUED so it returns exact answer. + */ +int swp_swapcount(swp_entry_t entry) +{ + int count, tmp_count, n; + struct swap_info_struct *p; + struct page *page; + pgoff_t offset; + unsigned char *map; + + p = swap_info_get(entry); + if (!p) + return 0; + + count = swap_count(p->swap_map[swp_offset(entry)]); + if (!(count & COUNT_CONTINUED)) + goto out; + + count &= ~COUNT_CONTINUED; + n = SWAP_MAP_MAX + 1; + + offset = swp_offset(entry); + page = vmalloc_to_page(p->swap_map + offset); + offset &= ~PAGE_MASK; + VM_BUG_ON(page_private(page) != SWP_CONTINUED); + + do { + page = list_entry(page->lru.next, struct page, lru); + map = kmap_atomic(page); + tmp_count = map[offset]; + kunmap_atomic(map); + + count += (tmp_count & ~COUNT_CONTINUED) * n; + n *= (SWAP_CONT_MAX + 1); + } while (tmp_count & COUNT_CONTINUED); +out: + spin_unlock(&p->lock); + return count; +} + /* * We can write to an anon page without COW if there are no other references * to it. And as a side-effect, free up its swap: because the old content -- cgit v1.2.3-59-g8ed1b From 998ef75ddb5709bbea0bf1506cd2717348a3c647 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 8 Sep 2015 15:00:28 -0700 Subject: fs: do not prefault sys_write() user buffer pages === Short summary ==== iov_iter_fault_in_readable() works around a really rare case and we can avoid the deadlock it addresses in another way: disable page faults and work around copy failures by faulting after the copy in a slow path instead of before in a hot one. I have a little microbenchmark that does repeated, small writes to tmpfs. This patch speeds that micro up by 6.2%. === Long version === When doing a sys_write() we have a source buffer in userspace and then a target file page. If both of those are the same physical page, there is a potential deadlock that we avoid. It would happen something like this: 1. We start the write to the file 2. Allocate page cache page and set it !Uptodate 3. Touch the userspace buffer to copy in the user data 4. Page fault (since source of the write not yet mapped) 5. Page fault code tries to lock the page and deadlocks (more details on this below) To avoid this, we prefault the page to guarantee that this fault does not occur. But, this prefault comes at a cost. It is one of the most expensive things that we do in a hot write() path (especially if we compare it to the read path). It is working around a pretty rare case. To fix this, it's pretty simple. We move the "prefault" code to run after we attempt the copy. We explicitly disable page faults _during_ the copy, detect the copy failure, then execute the "prefault" ouside of where the page lock needs to be held. iov_iter_copy_from_user_atomic() actually already has an implicit pagefault_disable() inside of it (at least on x86), but we add an explicit one. I don't think we can depend on every kmap_atomic() implementation to pagefault_disable() for eternity. =================================================== The stack trace when this happens looks like this: wait_on_page_bit_killable+0xc0/0xd0 __lock_page_or_retry+0x84/0xa0 filemap_fault+0x1ed/0x3d0 __do_fault+0x41/0xc0 handle_mm_fault+0x9bb/0x1210 __do_page_fault+0x17f/0x3d0 do_page_fault+0xc/0x10 page_fault+0x22/0x30 generic_perform_write+0xca/0x1a0 __generic_file_write_iter+0x190/0x1f0 ext4_file_write_iter+0xe9/0x460 __vfs_write+0xaa/0xe0 vfs_write+0xa6/0x1a0 SyS_write+0x46/0xa0 entry_SYSCALL_64_fastpath+0x12/0x6a 0xffffffffffffffff (Note, this does *NOT* happen in practice today because the kmap_atomic() does a pagefault_disable(). The trace above was obtained by taking out the pagefault_disable().) You can trigger the deadlock with this little code snippet: fd = open("foo", O_RDWR); fdmap = mmap(NULL, len, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0); write(fd, &fdmap[0], 1); Signed-off-by: Dave Hansen Cc: Al Viro Cc: Michal Hocko Cc: Jens Axboe Cc: Tejun Heo Cc: NeilBrown Cc: Matthew Wilcox Cc: Paul Cassella Cc: Greg Thelen Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 1283fc825458..30d69c0c5a38 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2473,21 +2473,6 @@ ssize_t generic_perform_write(struct file *file, iov_iter_count(i)); again: - /* - * Bring in the user page that we will copy from _first_. - * Otherwise there's a nasty deadlock on copying from the - * same page as we're writing to, without it being marked - * up-to-date. - * - * Not only is this an optimisation, but it is also required - * to check that the address is actually valid, when atomic - * usercopies are used, below. - */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { - status = -EFAULT; - break; - } - status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2495,8 +2480,17 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - + /* + * 'page' is now locked. If we are trying to copy from a + * mapping of 'page' in userspace, the copy might fault and + * would need PageUptodate() to complete. But, page can not be + * made Uptodate without acquiring the page lock, which we hold. + * Deadlock. Avoid with pagefault_disable(). Fix up below with + * iov_iter_fault_in_readable(). + */ + pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, @@ -2519,6 +2513,14 @@ again: */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); + /* + * This is the fallback to recover if the copy from + * userspace above faults. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } goto again; } pos += copied; -- cgit v1.2.3-59-g8ed1b From 2c0b80d463c6ade539d51ad03bc7c41849fb37e8 Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Tue, 8 Sep 2015 15:00:33 -0700 Subject: mm: make set_recommended_min_free_kbytes() return void This makes set_recommended_min_free_kbytes() have a return type of void as it cannot fail. Signed-off-by: Nicholas Krause Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e574efad8f8..71a4822c832b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -106,7 +106,7 @@ static struct khugepaged_scan khugepaged_scan = { }; -static int set_recommended_min_free_kbytes(void) +static void set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; @@ -141,7 +141,6 @@ static int set_recommended_min_free_kbytes(void) min_free_kbytes = recommended_min; } setup_per_zone_wmarks(); - return 0; } static int start_stop_khugepaged(void) -- cgit v1.2.3-59-g8ed1b From 6e0fc46dc2152d3e2d25a5d5b640ae3586c247c6 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 8 Sep 2015 15:00:36 -0700 Subject: mm, oom: organize oom context into struct There are essential elements to an oom context that are passed around to multiple functions. Organize these elements into a new struct, struct oom_control, that specifies the context for an oom condition. This patch introduces no functional change. Signed-off-by: David Rientjes Acked-by: Michal Hocko Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/tty/sysrq.c | 12 +++++- include/linux/oom.h | 25 +++++++----- mm/memcontrol.c | 16 +++++--- mm/oom_kill.c | 115 ++++++++++++++++++++++++---------------------------- mm/page_alloc.c | 10 ++++- 5 files changed, 98 insertions(+), 80 deletions(-) (limited to 'mm') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index b5b427888b24..ed3e258f4ee9 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -353,9 +353,17 @@ static struct sysrq_key_op sysrq_term_op = { static void moom_callback(struct work_struct *ignored) { + const gfp_t gfp_mask = GFP_KERNEL; + struct oom_control oc = { + .zonelist = node_zonelist(first_memory_node, gfp_mask), + .nodemask = NULL, + .gfp_mask = gfp_mask, + .order = 0, + .force_kill = true, + }; + mutex_lock(&oom_lock); - if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), - GFP_KERNEL, 0, NULL, true)) + if (!out_of_memory(&oc)) pr_info("OOM request ignored because killer is disabled\n"); mutex_unlock(&oom_lock); } diff --git a/include/linux/oom.h b/include/linux/oom.h index 7deecb7bca5e..cb29085ded37 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -12,6 +12,14 @@ struct notifier_block; struct mem_cgroup; struct task_struct; +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + gfp_t gfp_mask; + int order; + bool force_kill; +}; + /* * Types of limitations to the nodes from which allocations may occur */ @@ -57,21 +65,18 @@ extern unsigned long oom_badness(struct task_struct *p, extern int oom_kills_count(void); extern void note_oom_kill(void); -extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, +extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, nodemask_t *nodemask, - const char *message); + struct mem_cgroup *memcg, const char *message); -extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, - int order, const nodemask_t *nodemask, +extern void check_panic_on_oom(struct oom_control *oc, + enum oom_constraint constraint, struct mem_cgroup *memcg); -extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, - unsigned long totalpages, const nodemask_t *nodemask, - bool force_kill); +extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, + struct task_struct *task, unsigned long totalpages); -extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, - int order, nodemask_t *mask, bool force_kill); +extern bool out_of_memory(struct oom_control *oc); extern void exit_oom_victim(void); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1af057575ce9..573d90347aa2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1545,6 +1545,13 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, int order) { + struct oom_control oc = { + .zonelist = NULL, + .nodemask = NULL, + .gfp_mask = gfp_mask, + .order = order, + .force_kill = false, + }; struct mem_cgroup *iter; unsigned long chosen_points = 0; unsigned long totalpages; @@ -1563,7 +1570,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, goto unlock; } - check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); + check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; @@ -1571,8 +1578,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, css_task_iter_start(&iter->css, &it); while ((task = css_task_iter_next(&it))) { - switch (oom_scan_process_thread(task, totalpages, NULL, - false)) { + switch (oom_scan_process_thread(&oc, task, totalpages)) { case OOM_SCAN_SELECT: if (chosen) put_task_struct(chosen); @@ -1610,8 +1616,8 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, if (chosen) { points = chosen_points * 1000 / totalpages; - oom_kill_process(chosen, gfp_mask, order, points, totalpages, - memcg, NULL, "Memory cgroup out of memory"); + oom_kill_process(&oc, chosen, points, totalpages, memcg, + "Memory cgroup out of memory"); } unlock: mutex_unlock(&oom_lock); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dff991e0681e..80a7cbd89d66 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -196,27 +196,26 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, * Determine the type of allocation constraint. */ #ifdef CONFIG_NUMA -static enum oom_constraint constrained_alloc(struct zonelist *zonelist, - gfp_t gfp_mask, nodemask_t *nodemask, - unsigned long *totalpages) +static enum oom_constraint constrained_alloc(struct oom_control *oc, + unsigned long *totalpages) { struct zone *zone; struct zoneref *z; - enum zone_type high_zoneidx = gfp_zone(gfp_mask); + enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); bool cpuset_limited = false; int nid; /* Default to all available memory */ *totalpages = totalram_pages + total_swap_pages; - if (!zonelist) + if (!oc->zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ - if (gfp_mask & __GFP_THISNODE) + if (oc->gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* @@ -224,17 +223,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ - if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) { + if (oc->nodemask && + !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { *totalpages = total_swap_pages; - for_each_node_mask(nid, *nodemask) + for_each_node_mask(nid, *oc->nodemask) *totalpages += node_spanned_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ - for_each_zone_zonelist_nodemask(zone, z, zonelist, - high_zoneidx, nodemask) - if (!cpuset_zone_allowed(zone, gfp_mask)) + for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, + high_zoneidx, oc->nodemask) + if (!cpuset_zone_allowed(zone, oc->gfp_mask)) cpuset_limited = true; if (cpuset_limited) { @@ -246,20 +246,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, return CONSTRAINT_NONE; } #else -static enum oom_constraint constrained_alloc(struct zonelist *zonelist, - gfp_t gfp_mask, nodemask_t *nodemask, - unsigned long *totalpages) +static enum oom_constraint constrained_alloc(struct oom_control *oc, + unsigned long *totalpages) { *totalpages = totalram_pages + total_swap_pages; return CONSTRAINT_NONE; } #endif -enum oom_scan_t oom_scan_process_thread(struct task_struct *task, - unsigned long totalpages, const nodemask_t *nodemask, - bool force_kill) +enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, + struct task_struct *task, unsigned long totalpages) { - if (oom_unkillable_task(task, NULL, nodemask)) + if (oom_unkillable_task(task, NULL, oc->nodemask)) return OOM_SCAN_CONTINUE; /* @@ -267,7 +265,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, * Don't allow any other task to have access to the reserves. */ if (test_tsk_thread_flag(task, TIF_MEMDIE)) { - if (!force_kill) + if (!oc->force_kill) return OOM_SCAN_ABORT; } if (!task->mm) @@ -280,7 +278,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, if (oom_task_origin(task)) return OOM_SCAN_SELECT; - if (task_will_free_mem(task) && !force_kill) + if (task_will_free_mem(task) && !oc->force_kill) return OOM_SCAN_ABORT; return OOM_SCAN_OK; @@ -289,12 +287,9 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, /* * Simple selection loop. We chose the process with the highest * number of 'points'. Returns -1 on scan abort. - * - * (not docbooked, we don't want this one cluttering up the manual) */ -static struct task_struct *select_bad_process(unsigned int *ppoints, - unsigned long totalpages, const nodemask_t *nodemask, - bool force_kill) +static struct task_struct *select_bad_process(struct oom_control *oc, + unsigned int *ppoints, unsigned long totalpages) { struct task_struct *g, *p; struct task_struct *chosen = NULL; @@ -304,8 +299,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, for_each_process_thread(g, p) { unsigned int points; - switch (oom_scan_process_thread(p, totalpages, nodemask, - force_kill)) { + switch (oom_scan_process_thread(oc, p, totalpages)) { case OOM_SCAN_SELECT: chosen = p; chosen_points = ULONG_MAX; @@ -318,7 +312,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, case OOM_SCAN_OK: break; }; - points = oom_badness(p, NULL, nodemask, totalpages); + points = oom_badness(p, NULL, oc->nodemask, totalpages); if (!points || points < chosen_points) continue; /* Prefer thread group leaders for display purposes */ @@ -380,13 +374,13 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) rcu_read_unlock(); } -static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, - struct mem_cgroup *memcg, const nodemask_t *nodemask) +static void dump_header(struct oom_control *oc, struct task_struct *p, + struct mem_cgroup *memcg) { task_lock(current); pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " "oom_score_adj=%hd\n", - current->comm, gfp_mask, order, + current->comm, oc->gfp_mask, oc->order, current->signal->oom_score_adj); cpuset_print_task_mems_allowed(current); task_unlock(current); @@ -396,7 +390,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, else show_mem(SHOW_MEM_FILTER_NODES); if (sysctl_oom_dump_tasks) - dump_tasks(memcg, nodemask); + dump_tasks(memcg, oc->nodemask); } /* @@ -487,10 +481,9 @@ void oom_killer_enable(void) * Must be called while holding a reference to p, which will be released upon * returning. */ -void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, +void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, nodemask_t *nodemask, - const char *message) + struct mem_cgroup *memcg, const char *message) { struct task_struct *victim = p; struct task_struct *child; @@ -514,7 +507,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, task_unlock(p); if (__ratelimit(&oom_rs)) - dump_header(p, gfp_mask, order, memcg, nodemask); + dump_header(oc, p, memcg); task_lock(p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", @@ -537,7 +530,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, /* * oom_badness() returns 0 if the thread is unkillable */ - child_points = oom_badness(child, memcg, nodemask, + child_points = oom_badness(child, memcg, oc->nodemask, totalpages); if (child_points > victim_points) { put_task_struct(victim); @@ -600,8 +593,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ -void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, - int order, const nodemask_t *nodemask, +void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, struct mem_cgroup *memcg) { if (likely(!sysctl_panic_on_oom)) @@ -615,7 +607,7 @@ void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, if (constraint != CONSTRAINT_NONE) return; } - dump_header(NULL, gfp_mask, order, memcg, nodemask); + dump_header(oc, NULL, memcg); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } @@ -635,22 +627,16 @@ int unregister_oom_notifier(struct notifier_block *nb) EXPORT_SYMBOL_GPL(unregister_oom_notifier); /** - * __out_of_memory - kill the "best" process when we run out of memory - * @zonelist: zonelist pointer - * @gfp_mask: memory allocation flags - * @order: amount of memory being requested as a power of 2 - * @nodemask: nodemask passed to page allocator - * @force_kill: true if a task must be killed, even if others are exiting + * out_of_memory - kill the "best" process when we run out of memory + * @oc: pointer to struct oom_control * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, - int order, nodemask_t *nodemask, bool force_kill) +bool out_of_memory(struct oom_control *oc) { - const nodemask_t *mpol_mask; struct task_struct *p; unsigned long totalpages; unsigned long freed = 0; @@ -684,30 +670,29 @@ bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, * Check if there were limitations on the allocation (only relevant for * NUMA) that may require different handling. */ - constraint = constrained_alloc(zonelist, gfp_mask, nodemask, - &totalpages); - mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; - check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL); + constraint = constrained_alloc(oc, &totalpages); + if (constraint != CONSTRAINT_MEMORY_POLICY) + oc->nodemask = NULL; + check_panic_on_oom(oc, constraint, NULL); if (sysctl_oom_kill_allocating_task && current->mm && - !oom_unkillable_task(current, NULL, nodemask) && + !oom_unkillable_task(current, NULL, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); - oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, - nodemask, + oom_kill_process(oc, current, 0, totalpages, NULL, "Out of memory (oom_kill_allocating_task)"); goto out; } - p = select_bad_process(&points, totalpages, mpol_mask, force_kill); + p = select_bad_process(oc, &points, totalpages); /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { - dump_header(NULL, gfp_mask, order, NULL, mpol_mask); + dump_header(oc, NULL, NULL); panic("Out of memory and no killable processes...\n"); } if (p != (void *)-1UL) { - oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, - nodemask, "Out of memory"); + oom_kill_process(oc, p, points, totalpages, NULL, + "Out of memory"); killed = 1; } out: @@ -728,13 +713,21 @@ out: */ void pagefault_out_of_memory(void) { + struct oom_control oc = { + .zonelist = NULL, + .nodemask = NULL, + .gfp_mask = 0, + .order = 0, + .force_kill = false, + }; + if (mem_cgroup_oom_synchronize(true)) return; if (!mutex_trylock(&oom_lock)) return; - if (!out_of_memory(NULL, 0, 0, NULL, false)) { + if (!out_of_memory(&oc)) { /* * There shouldn't be any user tasks runnable while the * OOM killer is disabled, so the current task has to diff --git a/mm/page_alloc.c b/mm/page_alloc.c index badc7d3bde43..96536144185c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2693,6 +2693,13 @@ static inline struct page * __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) { + struct oom_control oc = { + .zonelist = ac->zonelist, + .nodemask = ac->nodemask, + .gfp_mask = gfp_mask, + .order = order, + .force_kill = false, + }; struct page *page; *did_some_progress = 0; @@ -2744,8 +2751,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; } /* Exhausted what can be done so it's blamo time */ - if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) - || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) + if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: mutex_unlock(&oom_lock); -- cgit v1.2.3-59-g8ed1b From 54e9e29132d7caefcad470281cae06ac34a982c8 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 8 Sep 2015 15:00:39 -0700 Subject: mm, oom: pass an oom order of -1 when triggered by sysrq The force_kill member of struct oom_control isn't needed if an order of -1 is used instead. This is the same as order == -1 in struct compact_control which requires full memory compaction. This patch introduces no functional change. Signed-off-by: David Rientjes Cc: Sergey Senozhatsky Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/tty/sysrq.c | 3 +-- include/linux/oom.h | 1 - mm/memcontrol.c | 1 - mm/oom_kill.c | 5 ++--- mm/page_alloc.c | 1 - 5 files changed, 3 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index ed3e258f4ee9..95b330a9ea98 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -358,8 +358,7 @@ static void moom_callback(struct work_struct *ignored) .zonelist = node_zonelist(first_memory_node, gfp_mask), .nodemask = NULL, .gfp_mask = gfp_mask, - .order = 0, - .force_kill = true, + .order = -1, }; mutex_lock(&oom_lock); diff --git a/include/linux/oom.h b/include/linux/oom.h index cb29085ded37..8fb67b9e6110 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -17,7 +17,6 @@ struct oom_control { nodemask_t *nodemask; gfp_t gfp_mask; int order; - bool force_kill; }; /* diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 573d90347aa2..9871f13fc35b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1550,7 +1550,6 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, .nodemask = NULL, .gfp_mask = gfp_mask, .order = order, - .force_kill = false, }; struct mem_cgroup *iter; unsigned long chosen_points = 0; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 80a7cbd89d66..77adc8e876aa 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -265,7 +265,7 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, * Don't allow any other task to have access to the reserves. */ if (test_tsk_thread_flag(task, TIF_MEMDIE)) { - if (!oc->force_kill) + if (oc->order != -1) return OOM_SCAN_ABORT; } if (!task->mm) @@ -278,7 +278,7 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, if (oom_task_origin(task)) return OOM_SCAN_SELECT; - if (task_will_free_mem(task) && !oc->force_kill) + if (task_will_free_mem(task) && oc->order != -1) return OOM_SCAN_ABORT; return OOM_SCAN_OK; @@ -718,7 +718,6 @@ void pagefault_out_of_memory(void) .nodemask = NULL, .gfp_mask = 0, .order = 0, - .force_kill = false, }; if (mem_cgroup_oom_synchronize(true)) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 96536144185c..5f9394df19bf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2698,7 +2698,6 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, .nodemask = ac->nodemask, .gfp_mask = gfp_mask, .order = order, - .force_kill = false, }; struct page *page; -- cgit v1.2.3-59-g8ed1b From 071a4befebb655d6b31bf5c6bacd5a6df035224d Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 8 Sep 2015 15:00:42 -0700 Subject: mm, oom: do not panic for oom kills triggered from sysrq Sysrq+f is used to kill a process either for debug or when the VM is otherwise unresponsive. It is not intended to trigger a panic when no process may be killed. Avoid panicking the system for sysrq+f when no processes are killed. Signed-off-by: David Rientjes Suggested-by: Michal Hocko Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysrq.txt | 3 ++- mm/oom_kill.c | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 267f39386f99..13f5619b2203 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt @@ -75,7 +75,8 @@ On all - write a character to /proc/sysrq-trigger. e.g.: 'e' - Send a SIGTERM to all processes, except for init. -'f' - Will call oom_kill to kill a memory hog process. +'f' - Will call the oom killer to kill a memory hog process, but do not + panic if nothing can be killed. 'g' - Used by kgdb (kernel debugger) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 77adc8e876aa..91dd59f63910 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -607,6 +607,9 @@ void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, if (constraint != CONSTRAINT_NONE) return; } + /* Do not panic for oom kills triggered by sysrq */ + if (oc->order == -1) + return; dump_header(oc, NULL, memcg); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); @@ -686,11 +689,11 @@ bool out_of_memory(struct oom_control *oc) p = select_bad_process(oc, &points, totalpages); /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { + if (!p && oc->order != -1) { dump_header(oc, NULL, NULL); panic("Out of memory and no killable processes...\n"); } - if (p != (void *)-1UL) { + if (p && p != (void *)-1UL) { oom_kill_process(oc, p, points, totalpages, NULL, "Out of memory"); killed = 1; -- cgit v1.2.3-59-g8ed1b From 75e8f8b24cb0dc4951267d31f0a49e5ce2f345c4 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 8 Sep 2015 15:00:47 -0700 Subject: mm, oom: remove unnecessary variable The "killed" variable in out_of_memory() can be removed since the call to oom_kill_process() where we should block to allow the process time to exit is obvious. Signed-off-by: David Rientjes Acked-by: Michal Hocko Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 91dd59f63910..1ecc0bcaecc5 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -645,7 +645,6 @@ bool out_of_memory(struct oom_control *oc) unsigned long freed = 0; unsigned int uninitialized_var(points); enum oom_constraint constraint = CONSTRAINT_NONE; - int killed = 0; if (oom_killer_disabled) return false; @@ -653,7 +652,7 @@ bool out_of_memory(struct oom_control *oc) blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ - goto out; + return true; /* * If current has a pending SIGKILL or is exiting, then automatically @@ -666,7 +665,7 @@ bool out_of_memory(struct oom_control *oc) if (current->mm && (fatal_signal_pending(current) || task_will_free_mem(current))) { mark_oom_victim(current); - goto out; + return true; } /* @@ -684,7 +683,7 @@ bool out_of_memory(struct oom_control *oc) get_task_struct(current); oom_kill_process(oc, current, 0, totalpages, NULL, "Out of memory (oom_kill_allocating_task)"); - goto out; + return true; } p = select_bad_process(oc, &points, totalpages); @@ -696,16 +695,12 @@ bool out_of_memory(struct oom_control *oc) if (p && p != (void *)-1UL) { oom_kill_process(oc, p, points, totalpages, NULL, "Out of memory"); - killed = 1; - } -out: - /* - * Give the killed threads a good chance of exiting before trying to - * allocate memory again. - */ - if (killed) + /* + * Give the killed process a good chance to exit before trying + * to allocate memory again. + */ schedule_timeout_killable(1); - + } return true; } -- cgit v1.2.3-59-g8ed1b From 3942d29918522ba6a393c19388301ec04df429cd Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:00:50 -0700 Subject: mm/slab_common: allow NULL cache pointer in kmem_cache_destroy() kmem_cache_destroy() does not tolerate a NULL kmem_cache pointer argument and performs a NULL-pointer dereference. This requires additional attention and effort from developers/reviewers and forces all kmem_cache_destroy() callers (200+ as of 4.1) to do a NULL check if (cache) kmem_cache_destroy(cache); Or, otherwise, be invalid kmem_cache_destroy() users. Tweak kmem_cache_destroy() and NULL-check the pointer there. Proposed by Andrew Morton. Link: https://lkml.org/lkml/2015/6/8/583 Signed-off-by: Sergey Senozhatsky Acked-by: David Rientjes Cc: Julia Lawall Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index c26829fe4e37..bde04a699ab6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -640,6 +640,9 @@ void kmem_cache_destroy(struct kmem_cache *s) bool need_rcu_barrier = false; bool busy = false; + if (unlikely(!s)) + return; + BUG_ON(!is_root_cache(s)); get_online_cpus(); -- cgit v1.2.3-59-g8ed1b From 4e3ca3e033d1eea62fa16c3fdbef4f20427bd0de Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:00:53 -0700 Subject: mm/mempool: allow NULL `pool' pointer in mempool_destroy() mempool_destroy() does not tolerate a NULL mempool_t pointer argument and performs a NULL-pointer dereference. This requires additional attention and effort from developers/reviewers and forces all mempool_destroy() callers to do a NULL check if (pool) mempool_destroy(pool); Or, otherwise, be invalid mempool_destroy() users. Tweak mempool_destroy() and NULL-check the pointer there. Proposed by Andrew Morton. Link: https://lkml.org/lkml/2015/6/8/583 Signed-off-by: Sergey Senozhatsky Acked-by: David Rientjes Cc: Julia Lawall Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempool.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/mempool.c b/mm/mempool.c index 2cc08de8b1db..4c533bc51d73 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -150,6 +150,9 @@ static void *remove_element(mempool_t *pool) */ void mempool_destroy(mempool_t *pool) { + if (unlikely(!pool)) + return; + while (pool->curr_nr) { void *element = remove_element(pool); pool->free(element, pool->pool_data); -- cgit v1.2.3-59-g8ed1b From 44d7175da6ea10e353e69b586bb68bbfef89e403 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:00:56 -0700 Subject: mm/dmapool: allow NULL `pool' pointer in dma_pool_destroy() dma_pool_destroy() does not tolerate a NULL dma_pool pointer argument and performs a NULL-pointer dereference. This requires additional attention and effort from developers/reviewers and forces all dma_pool_destroy() callers to do a NULL check if (pool) dma_pool_destroy(pool); Or, otherwise, be invalid dma_pool_destroy() users. Tweak dma_pool_destroy() and NULL-check the pointer there. Proposed by Andrew Morton. Link: https://lkml.org/lkml/2015/6/8/583 Signed-off-by: Sergey Senozhatsky Acked-by: David Rientjes Cc: Julia Lawall Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/dmapool.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/dmapool.c b/mm/dmapool.c index 59d10d16f0a5..4b657099111f 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -271,6 +271,9 @@ void dma_pool_destroy(struct dma_pool *pool) { bool empty = false; + if (unlikely(!pool)) + return; + mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); -- cgit v1.2.3-59-g8ed1b From 33398cf2f360c5ce24c8a22436d52a06ad4e5eb5 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 8 Sep 2015 15:01:02 -0700 Subject: memcg: export struct mem_cgroup mem_cgroup structure is defined in mm/memcontrol.c currently which means that the code outside of this file has to use external API even for trivial access stuff. This patch exports mm_struct with its dependencies and makes some of the exported functions inlines. This even helps to reduce the code size a bit (make defconfig + CONFIG_MEMCG=y) text data bss dec hex filename 12355346 1823792 1089536 15268674 e8fb42 vmlinux.before 12354970 1823792 1089536 15268298 e8f9ca vmlinux.after This is not much (370B) but better than nothing. We also save a function call in some hot paths like callers of mem_cgroup_count_vm_event which is used for accounting. The patch doesn't introduce any functional changes. [vdavykov@parallels.com: inline memcg_kmem_is_active] [vdavykov@parallels.com: do not expose type outside of CONFIG_MEMCG] [akpm@linux-foundation.org: memcontrol.h needs eventfd.h for eventfd_ctx] [akpm@linux-foundation.org: export mem_cgroup_from_task() to modules] Signed-off-by: Michal Hocko Reviewed-by: Vladimir Davydov Suggested-by: Johannes Weiner Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 370 +++++++++++++++++++++++++++++++++++++++++---- include/linux/swap.h | 10 +- include/net/sock.h | 28 ---- mm/memcontrol.c | 315 +------------------------------------- mm/memory-failure.c | 2 +- mm/slab_common.c | 2 +- mm/vmscan.c | 2 +- 7 files changed, 351 insertions(+), 378 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 73b02b0a8f60..ab2f6880e27b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -23,6 +23,11 @@ #include #include #include +#include +#include +#include +#include +#include struct mem_cgroup; struct page; @@ -67,12 +72,221 @@ enum mem_cgroup_events_index { MEMCG_NR_EVENTS, }; +/* + * Per memcg event counter is incremented at every pagein/pageout. With THP, + * it will be incremated by the number of pages. This counter is used for + * for trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + */ +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_TARGET_NUMAINFO, + MEM_CGROUP_NTARGETS, +}; + +/* + * Bits in struct cg_proto.flags + */ +enum cg_proto_flags { + /* Currently active and new sockets should be assigned to cgroups */ + MEMCG_SOCK_ACTIVE, + /* It was ever activated; we must disarm static keys on destruction */ + MEMCG_SOCK_ACTIVATED, +}; + +struct cg_proto { + struct page_counter memory_allocated; /* Current allocated memory. */ + struct percpu_counter sockets_allocated; /* Current number of sockets. */ + int memory_pressure; + long sysctl_mem[3]; + unsigned long flags; + /* + * memcg field is used to find which memcg we belong directly + * Each memcg struct can hold more than one cg_proto, so container_of + * won't really cut. + * + * The elegant solution would be having an inverse function to + * proto_cgroup in struct proto, but that means polluting the structure + * for everybody, instead of just for memcg users. + */ + struct mem_cgroup *memcg; +}; + #ifdef CONFIG_MEMCG +struct mem_cgroup_stat_cpu { + long count[MEM_CGROUP_STAT_NSTATS]; + unsigned long events[MEMCG_NR_EVENTS]; + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + /* scan generation, increased every round-trip */ + unsigned int generation; +}; + +/* + * per-zone information in memory controller. + */ +struct mem_cgroup_per_zone { + struct lruvec lruvec; + unsigned long lru_size[NR_LRU_LISTS]; + + struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; + + struct rb_node tree_node; /* RB tree node */ + unsigned long usage_in_excess;/* Set to the value by which */ + /* the soft limit is exceeded*/ + bool on_tree; + struct mem_cgroup *memcg; /* Back pointer, we cannot */ + /* use container_of */ +}; + +struct mem_cgroup_per_node { + struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; +}; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + unsigned long threshold; +}; + +/* For threshold */ +struct mem_cgroup_threshold_ary { + /* An array index points to threshold just below or equal to usage. */ + int current_threshold; + /* Size of entries[] */ + unsigned int size; + /* Array of thresholds */ + struct mem_cgroup_threshold entries[0]; +}; + +struct mem_cgroup_thresholds { + /* Primary thresholds array */ + struct mem_cgroup_threshold_ary *primary; + /* + * Spare threshold array. + * This is needed to make mem_cgroup_unregister_event() "never fail". + * It must be able to store at least primary->size - 1 entries. + */ + struct mem_cgroup_threshold_ary *spare; +}; + +/* + * The memory controller data structure. The memory controller controls both + * page cache and RSS per cgroup. We would eventually like to provide + * statistics based on the statistics developed by Rik Van Riel for clock-pro, + * to help the administrator determine what knobs to tune. + */ +struct mem_cgroup { + struct cgroup_subsys_state css; + + /* Accounted resources */ + struct page_counter memory; + struct page_counter memsw; + struct page_counter kmem; + + /* Normal memory consumption range */ + unsigned long low; + unsigned long high; + + unsigned long soft_limit; + + /* vmpressure notifications */ + struct vmpressure vmpressure; + + /* css_online() has been completed */ + int initialized; + + /* + * Should the accounting and control be hierarchical, per subtree? + */ + bool use_hierarchy; + + /* protected by memcg_oom_lock */ + bool oom_lock; + int under_oom; + + int swappiness; + /* OOM-Killer disable */ + int oom_kill_disable; + + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_thresholds thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_thresholds memsw_thresholds; + + /* For oom notifier event fd */ + struct list_head oom_notify; + + /* + * Should we move charges of a task when a task is moved into this + * mem_cgroup ? And what type of charges should we move ? + */ + unsigned long move_charge_at_immigrate; + /* + * set > 0 if pages under this cgroup are moving to other cgroup. + */ + atomic_t moving_account; + /* taken only while moving_account > 0 */ + spinlock_t move_lock; + struct task_struct *move_lock_task; + unsigned long move_lock_flags; + /* + * percpu counter. + */ + struct mem_cgroup_stat_cpu __percpu *stat; + spinlock_t pcp_counter_lock; + +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) + struct cg_proto tcp_mem; +#endif +#if defined(CONFIG_MEMCG_KMEM) + /* Index in the kmem_cache->memcg_params.memcg_caches array */ + int kmemcg_id; + bool kmem_acct_activated; + bool kmem_acct_active; +#endif + + int last_scanned_node; +#if MAX_NUMNODES > 1 + nodemask_t scan_nodes; + atomic_t numainfo_events; + atomic_t numainfo_updating; +#endif + +#ifdef CONFIG_CGROUP_WRITEBACK + struct list_head cgwb_list; + struct wb_domain cgwb_domain; +#endif + + /* List of events which userspace want to receive */ + struct list_head event_list; + spinlock_t event_list_lock; + + struct mem_cgroup_per_node *nodeinfo[0]; + /* WARNING: nodeinfo must be the last member here */ +}; extern struct cgroup_subsys_state *mem_cgroup_root_css; -void mem_cgroup_events(struct mem_cgroup *memcg, +/** + * mem_cgroup_events - count memory events against a cgroup + * @memcg: the memory cgroup + * @idx: the event index + * @nr: the number of events to account for + */ +static inline void mem_cgroup_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx, - unsigned int nr); + unsigned int nr) +{ + this_cpu_add(memcg->stat->events[idx], nr); +} bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); @@ -90,15 +304,31 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, - struct mem_cgroup *root); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); -extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); +static inline +struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, + struct mem_cgroup *, + struct mem_cgroup_reclaim_cookie *); +void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); + +static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root) +{ + if (root == memcg) + return true; + if (!root->use_hierarchy) + return false; + return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); +} static inline bool mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) @@ -114,22 +344,65 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, return match; } -extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); -struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, - struct mem_cgroup *, - struct mem_cgroup_reclaim_cookie *); -void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +static inline bool mem_cgroup_disabled(void) +{ + if (memory_cgrp_subsys.disabled) + return true; + return false; +} /* * For memory reclaim. */ -int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); -bool mem_cgroup_lruvec_online(struct lruvec *lruvec); int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); -unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); -void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); + +void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, + int nr_pages); + +static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) +{ + struct mem_cgroup_per_zone *mz; + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return true; + + mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); + memcg = mz->memcg; + + return !!(memcg->css.flags & CSS_ONLINE); +} + +static inline +unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) +{ + struct mem_cgroup_per_zone *mz; + + mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); + return mz->lru_size[lru]; +} + +static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) +{ + unsigned long inactive_ratio; + unsigned long inactive; + unsigned long active; + unsigned long gb; + + inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); + active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); + + gb = (inactive + active) >> (30 - PAGE_SHIFT); + if (gb) + inactive_ratio = int_sqrt(10 * gb); + else + inactive_ratio = 1; + + return inactive * inactive_ratio < active; +} + extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); @@ -156,18 +429,26 @@ bool mem_cgroup_oom_synchronize(bool wait); extern int do_swap_account; #endif -static inline bool mem_cgroup_disabled(void) -{ - if (memory_cgrp_subsys.disabled) - return true; - return false; -} - struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); -void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx, int val); void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); +/** + * mem_cgroup_update_page_stat - update page state statistics + * @memcg: memcg to account against + * @idx: page state item to account + * @val: number of pages (positive or negative) + * + * See mem_cgroup_begin_page_stat() for locking requirements. + */ +static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx, int val) +{ + VM_BUG_ON(!rcu_read_lock_held()); + + if (memcg) + this_cpu_add(memcg->stat->count[idx], val); +} + static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { @@ -184,13 +465,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned); -void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) { + struct mem_cgroup *memcg; + if (mem_cgroup_disabled()) return; - __mem_cgroup_count_vm_event(mm, idx); + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!memcg)) + goto out; + + switch (idx) { + case PGFAULT: + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); + break; + case PGMAJFAULT: + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); + break; + default: + BUG(); + } +out: + rcu_read_unlock(); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void mem_cgroup_split_huge_fixup(struct page *head); @@ -275,12 +574,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task, return true; } -static inline struct cgroup_subsys_state - *mem_cgroup_css(struct mem_cgroup *memcg) -{ - return NULL; -} - static inline struct mem_cgroup * mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, @@ -444,7 +737,10 @@ static inline bool memcg_kmem_enabled(void) return static_key_false(&memcg_kmem_enabled_key); } -bool memcg_kmem_is_active(struct mem_cgroup *memcg); +static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) +{ + return memcg->kmem_acct_active; +} /* * In general, we'll do everything in our power to not incur in any overhead @@ -463,7 +759,15 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order); void __memcg_kmem_uncharge_pages(struct page *page, int order); -int memcg_cache_id(struct mem_cgroup *memcg); +/* + * helper for acessing a memcg's index. It will be used as an index in the + * child cache array in kmem_cache, and also to derive its name. This function + * will return -1 when this is not a kmem-limited memcg. + */ +static inline int memcg_cache_id(struct mem_cgroup *memcg) +{ + return memcg ? memcg->kmemcg_id : -1; +} struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep); diff --git a/include/linux/swap.h b/include/linux/swap.h index 6282f1eb3d6a..2ce190709280 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -351,7 +351,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages); extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_MEMCG -extern int mem_cgroup_swappiness(struct mem_cgroup *mem); +static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) +{ + /* root ? */ + if (mem_cgroup_disabled() || !memcg->css.parent) + return vm_swappiness; + + return memcg->swappiness; +} + #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { diff --git a/include/net/sock.h b/include/net/sock.h index 43c6abcf06ab..a98c71ea40c5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1042,34 +1042,6 @@ struct proto { #endif }; -/* - * Bits in struct cg_proto.flags - */ -enum cg_proto_flags { - /* Currently active and new sockets should be assigned to cgroups */ - MEMCG_SOCK_ACTIVE, - /* It was ever activated; we must disarm static keys on destruction */ - MEMCG_SOCK_ACTIVATED, -}; - -struct cg_proto { - struct page_counter memory_allocated; /* Current allocated memory. */ - struct percpu_counter sockets_allocated; /* Current number of sockets. */ - int memory_pressure; - long sysctl_mem[3]; - unsigned long flags; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; -}; - int proto_register(struct proto *prot, int alloc_slab); void proto_unregister(struct proto *prot); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9871f13fc35b..6935f77589e7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -111,56 +111,10 @@ static const char * const mem_cgroup_lru_names[] = { "unevictable", }; -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_TARGET_NUMAINFO, - MEM_CGROUP_NTARGETS, -}; #define THRESHOLDS_EVENTS_TARGET 128 #define SOFTLIMIT_EVENTS_TARGET 1024 #define NUMAINFO_EVENTS_TARGET 1024 -struct mem_cgroup_stat_cpu { - long count[MEM_CGROUP_STAT_NSTATS]; - unsigned long events[MEMCG_NR_EVENTS]; - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; -}; - -struct reclaim_iter { - struct mem_cgroup *position; - /* scan generation, increased every round-trip */ - unsigned int generation; -}; - -/* - * per-zone information in memory controller. - */ -struct mem_cgroup_per_zone { - struct lruvec lruvec; - unsigned long lru_size[NR_LRU_LISTS]; - - struct reclaim_iter iter[DEF_PRIORITY + 1]; - - struct rb_node tree_node; /* RB tree node */ - unsigned long usage_in_excess;/* Set to the value by which */ - /* the soft limit is exceeded*/ - bool on_tree; - struct mem_cgroup *memcg; /* Back pointer, we cannot */ - /* use container_of */ -}; - -struct mem_cgroup_per_node { - struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; -}; - /* * Cgroups above their limits are maintained in a RB-Tree, independent of * their hierarchy representation @@ -181,32 +135,6 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; -struct mem_cgroup_threshold { - struct eventfd_ctx *eventfd; - unsigned long threshold; -}; - -/* For threshold */ -struct mem_cgroup_threshold_ary { - /* An array index points to threshold just below or equal to usage. */ - int current_threshold; - /* Size of entries[] */ - unsigned int size; - /* Array of thresholds */ - struct mem_cgroup_threshold entries[0]; -}; - -struct mem_cgroup_thresholds { - /* Primary thresholds array */ - struct mem_cgroup_threshold_ary *primary; - /* - * Spare threshold array. - * This is needed to make mem_cgroup_unregister_event() "never fail". - * It must be able to store at least primary->size - 1 entries. - */ - struct mem_cgroup_threshold_ary *spare; -}; - /* for OOM */ struct mem_cgroup_eventfd_list { struct list_head list; @@ -256,113 +184,6 @@ struct mem_cgroup_event { static void mem_cgroup_threshold(struct mem_cgroup *memcg); static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); -/* - * The memory controller data structure. The memory controller controls both - * page cache and RSS per cgroup. We would eventually like to provide - * statistics based on the statistics developed by Rik Van Riel for clock-pro, - * to help the administrator determine what knobs to tune. - */ -struct mem_cgroup { - struct cgroup_subsys_state css; - - /* Accounted resources */ - struct page_counter memory; - struct page_counter memsw; - struct page_counter kmem; - - /* Normal memory consumption range */ - unsigned long low; - unsigned long high; - - unsigned long soft_limit; - - /* vmpressure notifications */ - struct vmpressure vmpressure; - - /* css_online() has been completed */ - int initialized; - - /* - * Should the accounting and control be hierarchical, per subtree? - */ - bool use_hierarchy; - - /* protected by memcg_oom_lock */ - bool oom_lock; - int under_oom; - - int swappiness; - /* OOM-Killer disable */ - int oom_kill_disable; - - /* protect arrays of thresholds */ - struct mutex thresholds_lock; - - /* thresholds for memory usage. RCU-protected */ - struct mem_cgroup_thresholds thresholds; - - /* thresholds for mem+swap usage. RCU-protected */ - struct mem_cgroup_thresholds memsw_thresholds; - - /* For oom notifier event fd */ - struct list_head oom_notify; - - /* - * Should we move charges of a task when a task is moved into this - * mem_cgroup ? And what type of charges should we move ? - */ - unsigned long move_charge_at_immigrate; - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - /* taken only while moving_account > 0 */ - spinlock_t move_lock; - struct task_struct *move_lock_task; - unsigned long move_lock_flags; - /* - * percpu counter. - */ - struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; - -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) - struct cg_proto tcp_mem; -#endif -#if defined(CONFIG_MEMCG_KMEM) - /* Index in the kmem_cache->memcg_params.memcg_caches array */ - int kmemcg_id; - bool kmem_acct_activated; - bool kmem_acct_active; -#endif - - int last_scanned_node; -#if MAX_NUMNODES > 1 - nodemask_t scan_nodes; - atomic_t numainfo_events; - atomic_t numainfo_updating; -#endif - -#ifdef CONFIG_CGROUP_WRITEBACK - struct list_head cgwb_list; - struct wb_domain cgwb_domain; -#endif - - /* List of events which userspace want to receive */ - struct list_head event_list; - spinlock_t event_list_lock; - - struct mem_cgroup_per_node *nodeinfo[0]; - /* WARNING: nodeinfo must be the last member here */ -}; - -#ifdef CONFIG_MEMCG_KMEM -bool memcg_kmem_is_active(struct mem_cgroup *memcg) -{ - return memcg->kmem_acct_active; -} -#endif - /* Stuffs for move charges at task migration. */ /* * Types of charges to be moved. @@ -423,11 +244,6 @@ enum res_type { */ static DEFINE_MUTEX(memcg_create_mutex); -struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) -{ - return s ? container_of(s, struct mem_cgroup, css) : NULL; -} - /* Some nice accessors for the vmpressure. */ struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) { @@ -593,11 +409,6 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) return &memcg->nodeinfo[nid]->zoneinfo[zid]; } -struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) -{ - return &memcg->css; -} - /** * mem_cgroup_css_from_page - css of the memcg associated with a page * @page: page of interest @@ -876,14 +687,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, __this_cpu_add(memcg->stat->nr_page_events, nr_pages); } -unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) -{ - struct mem_cgroup_per_zone *mz; - - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); - return mz->lru_size[lru]; -} - static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) @@ -986,6 +789,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); } +EXPORT_SYMBOL(mem_cgroup_from_task); static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { @@ -1031,7 +835,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { - struct reclaim_iter *uninitialized_var(iter); + struct mem_cgroup_reclaim_iter *uninitialized_var(iter); struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; struct mem_cgroup *pos = NULL; @@ -1173,30 +977,6 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) -{ - struct mem_cgroup *memcg; - - rcu_read_lock(); - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!memcg)) - goto out; - - switch (idx) { - case PGFAULT: - this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); - break; - case PGMAJFAULT: - this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); - break; - default: - BUG(); - } -out: - rcu_read_unlock(); -} -EXPORT_SYMBOL(__mem_cgroup_count_vm_event); - /** * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg * @zone: zone of the wanted lruvec @@ -1295,15 +1075,6 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, VM_BUG_ON((long)(*lru_size) < 0); } -bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) -{ - if (root == memcg) - return true; - if (!root->use_hierarchy) - return false; - return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); -} - bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; @@ -1330,39 +1101,6 @@ bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) return ret; } -int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - unsigned long inactive_ratio; - unsigned long inactive; - unsigned long active; - unsigned long gb; - - inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); - - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); - else - inactive_ratio = 1; - - return inactive * inactive_ratio < active; -} - -bool mem_cgroup_lruvec_online(struct lruvec *lruvec) -{ - struct mem_cgroup_per_zone *mz; - struct mem_cgroup *memcg; - - if (mem_cgroup_disabled()) - return true; - - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); - memcg = mz->memcg; - - return !!(memcg->css.flags & CSS_ONLINE); -} - #define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) @@ -1394,15 +1132,6 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) return margin; } -int mem_cgroup_swappiness(struct mem_cgroup *memcg) -{ - /* root ? */ - if (mem_cgroup_disabled() || !memcg->css.parent) - return vm_swappiness; - - return memcg->swappiness; -} - /* * A routine for checking "mem" is under move_account() or not. * @@ -2067,23 +1796,6 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) } EXPORT_SYMBOL(mem_cgroup_end_page_stat); -/** - * mem_cgroup_update_page_stat - update page state statistics - * @memcg: memcg to account against - * @idx: page state item to account - * @val: number of pages (positive or negative) - * - * See mem_cgroup_begin_page_stat() for locking requirements. - */ -void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx, int val) -{ - VM_BUG_ON(!rcu_read_lock_held()); - - if (memcg) - this_cpu_add(memcg->stat->count[idx], val); -} - /* * size of first charge trial. "32" comes from vmscan.c's magic value. * TODO: maybe necessary to use big numbers in big irons. @@ -2509,16 +2221,6 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) css_put_many(&memcg->css, nr_pages); } -/* - * helper for acessing a memcg's index. It will be used as an index in the - * child cache array in kmem_cache, and also to derive its name. This function - * will return -1 when this is not a kmem-limited memcg. - */ -int memcg_cache_id(struct mem_cgroup *memcg) -{ - return memcg ? memcg->kmemcg_id : -1; -} - static int memcg_alloc_cache_id(void) { int id, size; @@ -5525,19 +5227,6 @@ struct cgroup_subsys memory_cgrp_subsys = { .early_init = 0, }; -/** - * mem_cgroup_events - count memory events against a cgroup - * @memcg: the memory cgroup - * @idx: the event index - * @nr: the number of events to account for - */ -void mem_cgroup_events(struct mem_cgroup *memcg, - enum mem_cgroup_events_index idx, - unsigned int nr) -{ - this_cpu_add(memcg->stat->events[idx], nr); -} - /** * mem_cgroup_low - check if memory consumption is below the normal range * @root: the highest ancestor to consider diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1f4446a90cef..016c814101ed 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -146,7 +146,7 @@ static int hwpoison_filter_task(struct page *p) if (!mem) return -EINVAL; - css = mem_cgroup_css(mem); + css = &mem->css; ino = cgroup_ino(css->cgroup); css_put(css); diff --git a/mm/slab_common.c b/mm/slab_common.c index bde04a699ab6..5ce4faeb16fb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -500,7 +500,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ - struct cgroup_subsys_state *css = mem_cgroup_css(memcg); + struct cgroup_subsys_state *css = &memcg->css; struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; diff --git a/mm/vmscan.c b/mm/vmscan.c index b1139039122a..bf23c88621ce 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc) if (!memcg) return true; #ifdef CONFIG_CGROUP_WRITEBACK - if (cgroup_on_dfl(mem_cgroup_css(memcg)->cgroup)) + if (memcg->css.cgroup) return true; #endif return false; -- cgit v1.2.3-59-g8ed1b From 9f2115f93b88e5e8d48b87b153e36a537afb58cb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Sep 2015 15:01:10 -0700 Subject: memcg: restructure mem_cgroup_can_attach() Restructure it to lower nesting level and help the planned threadgroup leader iteration changes. This is pure reorganization. Signed-off-by: Tejun Heo Signed-off-by: Michal Hocko Reviewed-by: Vladimir Davydov Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 61 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6935f77589e7..191d8aa5da71 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4834,10 +4834,12 @@ static void mem_cgroup_clear_mc(void) static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { - struct task_struct *p = cgroup_taskset_first(tset); - int ret = 0; struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *from; + struct task_struct *p; + struct mm_struct *mm; unsigned long move_flags; + int ret = 0; /* * We are now commited to this value whatever it is. Changes in this @@ -4845,36 +4847,37 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, * So we need to save it, and keep it going. */ move_flags = READ_ONCE(memcg->move_charge_at_immigrate); - if (move_flags) { - struct mm_struct *mm; - struct mem_cgroup *from = mem_cgroup_from_task(p); + if (!move_flags) + return 0; - VM_BUG_ON(from == memcg); + p = cgroup_taskset_first(tset); + from = mem_cgroup_from_task(p); - mm = get_task_mm(p); - if (!mm) - return 0; - /* We move charges only when we move a owner of the mm */ - if (mm->owner == p) { - VM_BUG_ON(mc.from); - VM_BUG_ON(mc.to); - VM_BUG_ON(mc.precharge); - VM_BUG_ON(mc.moved_charge); - VM_BUG_ON(mc.moved_swap); - - spin_lock(&mc.lock); - mc.from = from; - mc.to = memcg; - mc.flags = move_flags; - spin_unlock(&mc.lock); - /* We set mc.moving_task later */ - - ret = mem_cgroup_precharge_mc(mm); - if (ret) - mem_cgroup_clear_mc(); - } - mmput(mm); + VM_BUG_ON(from == memcg); + + mm = get_task_mm(p); + if (!mm) + return 0; + /* We move charges only when we move a owner of the mm */ + if (mm->owner == p) { + VM_BUG_ON(mc.from); + VM_BUG_ON(mc.to); + VM_BUG_ON(mc.precharge); + VM_BUG_ON(mc.moved_charge); + VM_BUG_ON(mc.moved_swap); + + spin_lock(&mc.lock); + mc.from = from; + mc.to = memcg; + mc.flags = move_flags; + spin_unlock(&mc.lock); + /* We set mc.moving_task later */ + + ret = mem_cgroup_precharge_mc(mm); + if (ret) + mem_cgroup_clear_mc(); } + mmput(mm); return ret; } -- cgit v1.2.3-59-g8ed1b From a03f1f058969ec350fb7451a6fbca23096ee5727 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 8 Sep 2015 15:01:13 -0700 Subject: memcg, tcp_kmem: check for cg_proto in sock_update_memcg sk_prot->proto_cgroup is allowed to return NULL but sock_update_memcg doesn't check for NULL. The function relies on the mem_cgroup_is_root check because we shouldn't get NULL otherwise because mem_cgroup_from_task will always return !NULL. All other callers are checking for NULL and we can safely replace mem_cgroup_is_root() check by cg_proto != NULL which will be more straightforward (proto_cgroup returns NULL for the root memcg already). Signed-off-by: Michal Hocko Reviewed-by: Vladimir Davydov Cc: Johannes Weiner Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 191d8aa5da71..3033e6c42229 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -315,8 +315,7 @@ void sock_update_memcg(struct sock *sk) rcu_read_lock(); memcg = mem_cgroup_from_task(current); cg_proto = sk->sk_prot->proto_cgroup(memcg); - if (!mem_cgroup_is_root(memcg) && - memcg_proto_active(cg_proto) && + if (cg_proto && memcg_proto_active(cg_proto) && css_tryget_online(&memcg->css)) { sk->sk_cgrp = cg_proto; } -- cgit v1.2.3-59-g8ed1b From e752eb68811aeece2220e183e23369a34122fb5e Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 8 Sep 2015 15:01:16 -0700 Subject: memcg: move memcg_proto_active from sock.h The only user is sock_update_memcg which is living in memcontrol.c so it doesn't make much sense to pollute sock.h by this inline helper. Move it to memcontrol.c and open code it into its only caller. Signed-off-by: Michal Hocko Cc: Vladimir Davydov Cc: Johannes Weiner Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/net/sock.h | 5 ----- mm/memcontrol.c | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'mm') diff --git a/include/net/sock.h b/include/net/sock.h index a98c71ea40c5..7aa78440559a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1045,11 +1045,6 @@ struct proto { int proto_register(struct proto *prot, int alloc_slab); void proto_unregister(struct proto *prot); -static inline bool memcg_proto_active(struct cg_proto *cg_proto) -{ - return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); -} - #ifdef SOCK_REFCNT_DEBUG static inline void sk_refcnt_debug_inc(struct sock *sk) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3033e6c42229..1742a2db89c7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -315,7 +315,7 @@ void sock_update_memcg(struct sock *sk) rcu_read_lock(); memcg = mem_cgroup_from_task(current); cg_proto = sk->sk_prot->proto_cgroup(memcg); - if (cg_proto && memcg_proto_active(cg_proto) && + if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) && css_tryget_online(&memcg->css)) { sk->sk_cgrp = cg_proto; } -- cgit v1.2.3-59-g8ed1b From aa016d145d4c3b8a7273429528f19d5b423ddbc7 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:01:22 -0700 Subject: mm, page_isolation: remove bogus tests for isolated pages The __test_page_isolated_in_pageblock() is used to verify whether all pages in pageblock were either successfully isolated, or are hwpoisoned. Two of the possible state of pages, that are tested, are however bogus and misleading. Both tests rely on get_freepage_migratetype(page), which however has no guarantees about pages on freelists. Specifically, it doesn't guarantee that the migratetype returned by the function actually matches the migratetype of the freelist that the page is on. Such guarantee is not its purpose and would have negative impact on allocator performance. The first test checks whether the freepage_migratetype equals MIGRATE_ISOLATE, supposedly to catch races between page isolation and allocator activity. These races should be fixed nowadays with 51bb1a4093 ("mm/page_alloc: add freepage on isolate pageblock to correct buddy list") and related patches. As explained above, the check wouldn't be able to catch them reliably anyway. For the same reason false positives can happen, although they are harmless, as the move_freepages() call would just move the page to the same freelist it's already on. So removing the test is not a bug fix, just cleanup. After this patch, we assume that all PageBuddy pages are on the correct freelist and that the races were really fixed. A truly reliable verification in the form of e.g. VM_BUG_ON() would be complicated and is arguably not needed. The second test (page_count(page) == 0 && get_freepage_migratetype(page) == MIGRATE_ISOLATE) is probably supposed (the code comes from a big memory isolation patch from 2007) to catch pages on MIGRATE_ISOLATE pcplists. However, pcplists don't contain MIGRATE_ISOLATE freepages nowadays, those are freed directly to free lists, so the check is obsolete. Remove it as well. Signed-off-by: Vlastimil Babka Acked-by: Joonsoo Kim Cc: Minchan Kim Acked-by: Michal Nazarewicz Cc: Laura Abbott Reviewed-by: Naoya Horiguchi Cc: Seungho Park Cc: Johannes Weiner Cc: "Kirill A. Shutemov" Acked-by: Mel Gorman Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++++ mm/page_isolation.c | 30 ++++++------------------------ 2 files changed, 10 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5f9394df19bf..a329cfaf634d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -788,7 +788,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); + mt = get_freepage_migratetype(page); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ if (unlikely(has_isolate_pageblock(zone))) mt = get_pageblock_migratetype(page); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 303c908790ef..32fdc1df05e5 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -223,34 +223,16 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, continue; } page = pfn_to_page(pfn); - if (PageBuddy(page)) { + if (PageBuddy(page)) /* - * If race between isolatation and allocation happens, - * some free pages could be in MIGRATE_MOVABLE list - * although pageblock's migratation type of the page - * is MIGRATE_ISOLATE. Catch it and move the page into - * MIGRATE_ISOLATE list. + * If the page is on a free list, it has to be on + * the correct MIGRATE_ISOLATE freelist. There is no + * simple way to verify that as VM_BUG_ON(), though. */ - if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { - struct page *end_page; - - end_page = page + (1 << page_order(page)) - 1; - move_freepages(page_zone(page), page, end_page, - MIGRATE_ISOLATE); - } pfn += 1 << page_order(page); - } - else if (page_count(page) == 0 && - get_freepage_migratetype(page) == MIGRATE_ISOLATE) - pfn += 1; - else if (skip_hwpoisoned_pages && PageHWPoison(page)) { - /* - * The HWPoisoned page may be not in buddy - * system, and page_count() is not 0. - */ + else if (skip_hwpoisoned_pages && PageHWPoison(page)) + /* A HWPoisoned page cannot be also PageBuddy */ pfn++; - continue; - } else break; } -- cgit v1.2.3-59-g8ed1b From bb14c2c75db972a1bf65fd63c8d5a0b41a8f263a Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:01:25 -0700 Subject: mm: rename and move get/set_freepage_migratetype The pair of get/set_freepage_migratetype() functions are used to cache pageblock migratetype for a page put on a pcplist, so that it does not have to be retrieved again when the page is put on a free list (e.g. when pcplists become full). Historically it was also assumed that the value is accurate for pages on freelists (as the functions' names unfortunately suggest), but that cannot be guaranteed without affecting various allocator fast paths. It is in fact not needed and all such uses have been removed. The last remaining (but pointless) usage related to pages of freelists is in move_freepages(), which this patch removes. To prevent further confusion, rename the functions to get/set_pcppage_migratetype() and expand their description. Since all the users are now in mm/page_alloc.c, move the functions there from the shared header. Signed-off-by: Vlastimil Babka Acked-by: David Rientjes Acked-by: Joonsoo Kim Cc: Minchan Kim Acked-by: Michal Nazarewicz Cc: Laura Abbott Reviewed-by: Naoya Horiguchi Cc: Seungho Park Cc: Johannes Weiner Cc: "Kirill A. Shutemov" Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 12 ------------ mm/page_alloc.c | 41 ++++++++++++++++++++++++++++------------- 2 files changed, 28 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 4ec72ef1f04a..bab8ff89da50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -309,18 +309,6 @@ struct inode; #define page_private(page) ((page)->private) #define set_page_private(page, v) ((page)->private = (v)) -/* It's valid only if the page is free path or free_list */ -static inline void set_freepage_migratetype(struct page *page, int migratetype) -{ - page->index = migratetype; -} - -/* It's valid only if the page is free path or free_list */ -static inline int get_freepage_migratetype(struct page *page) -{ - return page->index; -} - /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a329cfaf634d..252665d553b4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -125,6 +125,24 @@ unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; +/* + * A cached value of the page's pageblock's migratetype, used when the page is + * put on a pcplist. Used to avoid the pageblock migratetype lookup when + * freeing from pcplists in most cases, at the cost of possibly becoming stale. + * Also the migratetype set in the page does not necessarily match the pcplist + * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any + * other index - this ensures that it will be put on the correct CMA freelist. + */ +static inline int get_pcppage_migratetype(struct page *page) +{ + return page->index; +} + +static inline void set_pcppage_migratetype(struct page *page, int migratetype) +{ + page->index = migratetype; +} + #ifdef CONFIG_PM_SLEEP /* * The following functions are used by the suspend/hibernate code to temporarily @@ -789,7 +807,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - mt = get_freepage_migratetype(page); + mt = get_pcppage_migratetype(page); /* MIGRATE_ISOLATE page should not go to pcplists */ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); /* Pageblock could have been isolated meanwhile */ @@ -956,7 +974,6 @@ static void __free_pages_ok(struct page *page, unsigned int order) migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); - set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, pfn, order, migratetype); local_irq_restore(flags); } @@ -1384,7 +1401,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); - set_freepage_migratetype(page, migratetype); + set_pcppage_migratetype(page, migratetype); return page; } @@ -1461,7 +1478,6 @@ int move_freepages(struct zone *zone, order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); - set_freepage_migratetype(page, migratetype); page += 1 << order; pages_moved += 1 << order; } @@ -1631,14 +1647,13 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) expand(zone, page, order, current_order, area, start_migratetype); /* - * The freepage_migratetype may differ from pageblock's + * The pcppage_migratetype may differ from pageblock's * migratetype depending on the decisions in - * try_to_steal_freepages(). This is OK as long as it - * does not differ for MIGRATE_CMA pageblocks. For CMA - * we need to make sure unallocated pages flushed from - * pcp lists are returned to the correct freelist. + * find_suitable_fallback(). This is OK as long as it does not + * differ for MIGRATE_CMA pageblocks. Those can be used as + * fallback only via special __rmqueue_cma_fallback() function */ - set_freepage_migratetype(page, start_migratetype); + set_pcppage_migratetype(page, start_migratetype); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); @@ -1714,7 +1729,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, else list_add_tail(&page->lru, list); list = &page->lru; - if (is_migrate_cma(get_freepage_migratetype(page))) + if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } @@ -1911,7 +1926,7 @@ void free_hot_cold_page(struct page *page, bool cold) return; migratetype = get_pfnblock_migratetype(page, pfn); - set_freepage_migratetype(page, migratetype); + set_pcppage_migratetype(page, migratetype); local_irq_save(flags); __count_vm_event(PGFREE); @@ -2116,7 +2131,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (!page) goto failed; __mod_zone_freepage_state(zone, -(1 << order), - get_freepage_migratetype(page)); + get_pcppage_migratetype(page)); } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); -- cgit v1.2.3-59-g8ed1b From 5e9113731a3ce616e8b5aa128ffc1aeaa4942571 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:28 -0700 Subject: mm/hugetlb: add cache of descriptors to resv_map for region_add hugetlbfs is used today by applications that want a high degree of control over huge page usage. Often, large hugetlbfs files are used to map a large number huge pages into the application processes. The applications know when page ranges within these large files will no longer be used, and ideally would like to release them back to the subpool or global pools for other uses. The fallocate() system call provides an interface for preallocation and hole punching within files. This patch set adds fallocate functionality to hugetlbfs. fallocate hole punch will want to remove a specific range of pages. When pages are removed, their associated entries in the region/reserve map will also be removed. This will break an assumption in the region_chg/region_add calling sequence. If a new region descriptor must be allocated, it is done as part of the region_chg processing. In this way, region_add can not fail because it does not need to attempt an allocation. To prepare for fallocate hole punch, create a "cache" of descriptors that can be used by region_add if necessary. region_chg will ensure there are sufficient entries in the cache. It will be necessary to track the number of in progress add operations to know a sufficient number of descriptors reside in the cache. A new routine region_abort is added to adjust this in progress count when add operations are aborted. vma_abort_reservation is also added for callers creating reservations with vma_needs_reservation/vma_commit_reservation. [akpm@linux-foundation.org: fix typo in comment, use more cols] Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 3 + mm/hugetlb.c | 174 ++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 155 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d891f949466a..e2d94960b38b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -35,6 +35,9 @@ struct resv_map { struct kref refs; spinlock_t lock; struct list_head regions; + long adds_in_progress; + struct list_head region_cache; + long region_cache_count; }; extern struct resv_map *resv_map_alloc(void); void resv_map_release(struct kref *ref); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 51ae41d0fbc0..4e5815ed7a8e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -240,11 +240,14 @@ struct file_region { /* * Add the huge page range represented by [f, t) to the reserve - * map. Existing regions will be expanded to accommodate the - * specified range. We know only existing regions need to be - * expanded, because region_add is only called after region_chg - * with the same range. If a new file_region structure must - * be allocated, it is done in region_chg. + * map. In the normal case, existing regions will be expanded + * to accommodate the specified range. Sufficient regions should + * exist for expansion due to the previous call to region_chg + * with the same range. However, it is possible that region_del + * could have been called after region_chg and modifed the map + * in such a way that no region exists to be expanded. In this + * case, pull a region descriptor from the cache associated with + * the map and use that for the new range. * * Return the number of new huge pages added to the map. This * number is greater than or equal to zero. @@ -261,6 +264,28 @@ static long region_add(struct resv_map *resv, long f, long t) if (f <= rg->to) break; + /* + * If no region exists which can be expanded to include the + * specified range, the list must have been modified by an + * interleving call to region_del(). Pull a region descriptor + * from the cache and use it for this range. + */ + if (&rg->link == head || t < rg->from) { + VM_BUG_ON(resv->region_cache_count <= 0); + + resv->region_cache_count--; + nrg = list_first_entry(&resv->region_cache, struct file_region, + link); + list_del(&nrg->link); + + nrg->from = f; + nrg->to = t; + list_add(&nrg->link, rg->link.prev); + + add += t - f; + goto out_locked; + } + /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; @@ -294,6 +319,8 @@ static long region_add(struct resv_map *resv, long f, long t) add += t - nrg->to; /* Added to end of region */ nrg->to = t; +out_locked: + resv->adds_in_progress--; spin_unlock(&resv->lock); VM_BUG_ON(add < 0); return add; @@ -312,11 +339,14 @@ static long region_add(struct resv_map *resv, long f, long t) * so that the subsequent region_add call will have all the * regions it needs and will not fail. * - * Returns the number of huge pages that need to be added - * to the existing reservation map for the range [f, t). - * This number is greater or equal to zero. -ENOMEM is - * returned if a new file_region structure is needed and can - * not be allocated. + * Upon entry, region_chg will also examine the cache of region descriptors + * associated with the map. If there are not enough descriptors cached, one + * will be allocated for the in progress add operation. + * + * Returns the number of huge pages that need to be added to the existing + * reservation map for the range [f, t). This number is greater or equal to + * zero. -ENOMEM is returned if a new file_region structure or cache entry + * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t) { @@ -326,6 +356,31 @@ static long region_chg(struct resv_map *resv, long f, long t) retry: spin_lock(&resv->lock); +retry_locked: + resv->adds_in_progress++; + + /* + * Check for sufficient descriptors in the cache to accommodate + * the number of in progress add operations. + */ + if (resv->adds_in_progress > resv->region_cache_count) { + struct file_region *trg; + + VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); + /* Must drop lock to allocate a new descriptor. */ + resv->adds_in_progress--; + spin_unlock(&resv->lock); + + trg = kmalloc(sizeof(*trg), GFP_KERNEL); + if (!trg) + return -ENOMEM; + + spin_lock(&resv->lock); + list_add(&trg->link, &resv->region_cache); + resv->region_cache_count++; + goto retry_locked; + } + /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) @@ -336,6 +391,7 @@ retry: * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { if (!nrg) { + resv->adds_in_progress--; spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) @@ -384,6 +440,25 @@ out_nrg: return chg; } +/* + * Abort the in progress add operation. The adds_in_progress field + * of the resv_map keeps track of the operations in progress between + * calls to region_chg and region_add. Operations are sometimes + * aborted after the call to region_chg. In such cases, region_abort + * is called to decrement the adds_in_progress counter. + * + * NOTE: The range arguments [f, t) are not needed or used in this + * routine. They are kept to make reading the calling code easier as + * arguments will match the associated region_chg call. + */ +static void region_abort(struct resv_map *resv, long f, long t) +{ + spin_lock(&resv->lock); + VM_BUG_ON(!resv->region_cache_count); + resv->adds_in_progress--; + spin_unlock(&resv->lock); +} + /* * Truncate the reserve map at index 'end'. Modify/truncate any * region which contains end. Delete any regions past end. @@ -544,22 +619,44 @@ static void set_vma_private_data(struct vm_area_struct *vma, struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); - if (!resv_map) + struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); + + if (!resv_map || !rg) { + kfree(resv_map); + kfree(rg); return NULL; + } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); + resv_map->adds_in_progress = 0; + + INIT_LIST_HEAD(&resv_map->region_cache); + list_add(&rg->link, &resv_map->region_cache); + resv_map->region_cache_count = 1; + return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); + struct list_head *head = &resv_map->region_cache; + struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_truncate(resv_map, 0); + + /* ... and any entries left in the cache */ + list_for_each_entry_safe(rg, trg, head, link) { + list_del(&rg->link); + kfree(rg); + } + + VM_BUG_ON(resv_map->adds_in_progress); + kfree(resv_map); } @@ -1473,16 +1570,18 @@ static void return_unused_surplus_pages(struct hstate *h, } } + /* - * vma_needs_reservation and vma_commit_reservation are used by the huge - * page allocation routines to manage reservations. + * vma_needs_reservation, vma_commit_reservation and vma_abort_reservation + * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called - * to add the page to the reservation map. + * to add the page to the reservation map. If the reservation must be + * aborted instead of committed, vma_abort_reservation is called. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this @@ -1490,9 +1589,14 @@ static void return_unused_surplus_pages(struct hstate *h, * is the responsibility of the caller to notice the difference and * take appropriate action. */ +enum vma_resv_mode { + VMA_NEEDS_RESV, + VMA_COMMIT_RESV, + VMA_ABORT_RESV, +}; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, - bool commit) + enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; @@ -1503,10 +1607,20 @@ static long __vma_reservation_common(struct hstate *h, return 1; idx = vma_hugecache_offset(h, vma, addr); - if (commit) - ret = region_add(resv, idx, idx + 1); - else + switch (mode) { + case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1); + break; + case VMA_COMMIT_RESV: + ret = region_add(resv, idx, idx + 1); + break; + case VMA_ABORT_RESV: + region_abort(resv, idx, idx + 1); + ret = 0; + break; + default: + BUG(); + } if (vma->vm_flags & VM_MAYSHARE) return ret; @@ -1517,13 +1631,19 @@ static long __vma_reservation_common(struct hstate *h, static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - return __vma_reservation_common(h, vma, addr, false); + return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - return __vma_reservation_common(h, vma, addr, true); + return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); +} + +static void vma_abort_reservation(struct hstate *h, + struct vm_area_struct *vma, unsigned long addr) +{ + (void)__vma_reservation_common(h, vma, addr, VMA_ABORT_RESV); } static struct page *alloc_huge_page(struct vm_area_struct *vma, @@ -1549,8 +1669,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, if (chg < 0) return ERR_PTR(-ENOMEM); if (chg || avoid_reserve) - if (hugepage_subpool_get_pages(spool, 1) < 0) + if (hugepage_subpool_get_pages(spool, 1) < 0) { + vma_abort_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); + } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) @@ -1596,6 +1718,7 @@ out_uncharge_cgroup: out_subpool_put: if (chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); + vma_abort_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } @@ -3236,11 +3359,14 @@ retry: * any allocations necessary to record that reservation occur outside * the spinlock. */ - if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } + /* Just decrements count, does not deallocate */ + vma_abort_reservation(h, vma, address); + } ptl = huge_pte_lockptr(h, mm, ptep); spin_lock(ptl); @@ -3387,6 +3513,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out_mutex; } + /* Just decrements count, does not deallocate */ + vma_abort_reservation(h, vma, address); if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, @@ -3726,6 +3854,8 @@ int hugetlb_reserve_pages(struct inode *inode, } return 0; out_err: + if (!vma || vma->vm_flags & VM_MAYSHARE) + region_abort(resv_map, from, to); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return ret; -- cgit v1.2.3-59-g8ed1b From feba16e25a578080af5aad5eb9e469b4e6c23eef Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:31 -0700 Subject: mm/hugetlb: add region_del() to delete a specific range of entries fallocate hole punch will want to remove a specific range of pages. The existing region_truncate() routine deletes all region/reserve map entries after a specified offset. region_del() will provide this same functionality if the end of region is specified as LONG_MAX. Hence, region_del() can replace region_truncate(). Unlike region_truncate(), region_del() can return an error in the rare case where it can not allocate memory for a region descriptor. This ONLY happens in the case where an existing region must be split. Current callers passing LONG_MAX as end of range will never experience this error and do not need to deal with error handling. Future callers of region_del() (such as fallocate hole punch) will need to handle this error. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 122 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 85 insertions(+), 37 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4e5815ed7a8e..78e7eded4063 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -460,43 +460,90 @@ static void region_abort(struct resv_map *resv, long f, long t) } /* - * Truncate the reserve map at index 'end'. Modify/truncate any - * region which contains end. Delete any regions past end. - * Return the number of huge pages removed from the map. + * Delete the specified range [f, t) from the reserve map. If the + * t parameter is LONG_MAX, this indicates that ALL regions after f + * should be deleted. Locate the regions which intersect [f, t) + * and either trim, delete or split the existing regions. + * + * Returns the number of huge pages deleted from the reserve map. + * In the normal case, the return value is zero or more. In the + * case where a region must be split, a new region descriptor must + * be allocated. If the allocation fails, -ENOMEM will be returned. + * NOTE: If the parameter t == LONG_MAX, then we will never split + * a region and possibly return -ENOMEM. Callers specifying + * t == LONG_MAX do not need to check for -ENOMEM error. */ -static long region_truncate(struct resv_map *resv, long end) +static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; - long chg = 0; + struct file_region *nrg = NULL; + long del = 0; +retry: spin_lock(&resv->lock); - /* Locate the region we are either in or before. */ - list_for_each_entry(rg, head, link) - if (end <= rg->to) + list_for_each_entry_safe(rg, trg, head, link) { + if (rg->to <= f) + continue; + if (rg->from >= t) break; - if (&rg->link == head) - goto out; - /* If we are in the middle of a region then adjust it. */ - if (end > rg->from) { - chg = rg->to - end; - rg->to = end; - rg = list_entry(rg->link.next, typeof(*rg), link); - } + if (f > rg->from && t < rg->to) { /* Must split region */ + /* + * Check for an entry in the cache before dropping + * lock and attempting allocation. + */ + if (!nrg && + resv->region_cache_count > resv->adds_in_progress) { + nrg = list_first_entry(&resv->region_cache, + struct file_region, + link); + list_del(&nrg->link); + resv->region_cache_count--; + } - /* Drop any remaining regions. */ - list_for_each_entry_safe(rg, trg, rg->link.prev, link) { - if (&rg->link == head) + if (!nrg) { + spin_unlock(&resv->lock); + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); + if (!nrg) + return -ENOMEM; + goto retry; + } + + del += t - f; + + /* New entry for end of split region */ + nrg->from = t; + nrg->to = rg->to; + INIT_LIST_HEAD(&nrg->link); + + /* Original entry is trimmed */ + rg->to = f; + + list_add(&nrg->link, &rg->link); + nrg = NULL; break; - chg += rg->to - rg->from; - list_del(&rg->link); - kfree(rg); + } + + if (f <= rg->from && t >= rg->to) { /* Remove entire region */ + del += rg->to - rg->from; + list_del(&rg->link); + kfree(rg); + continue; + } + + if (f <= rg->from) { /* Trim beginning of region */ + del += t - rg->from; + rg->from = t; + } else { /* Trim end of region */ + del += rg->to - f; + rg->to = f; + } } -out: spin_unlock(&resv->lock); - return chg; + kfree(nrg); + return del; } /* @@ -647,7 +694,7 @@ void resv_map_release(struct kref *ref) struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ - region_truncate(resv_map, 0); + region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { @@ -1572,7 +1619,7 @@ static void return_unused_surplus_pages(struct hstate *h, /* - * vma_needs_reservation, vma_commit_reservation and vma_abort_reservation + * vma_needs_reservation, vma_commit_reservation and vma_end_reservation * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr @@ -1580,8 +1627,9 @@ static void return_unused_surplus_pages(struct hstate *h, * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called - * to add the page to the reservation map. If the reservation must be - * aborted instead of committed, vma_abort_reservation is called. + * to add the page to the reservation map. If the page allocation fails, + * the reservation must be ended instead of committed. vma_end_reservation + * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this @@ -1592,7 +1640,7 @@ static void return_unused_surplus_pages(struct hstate *h, enum vma_resv_mode { VMA_NEEDS_RESV, VMA_COMMIT_RESV, - VMA_ABORT_RESV, + VMA_END_RESV, }; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, @@ -1614,7 +1662,7 @@ static long __vma_reservation_common(struct hstate *h, case VMA_COMMIT_RESV: ret = region_add(resv, idx, idx + 1); break; - case VMA_ABORT_RESV: + case VMA_END_RESV: region_abort(resv, idx, idx + 1); ret = 0; break; @@ -1640,10 +1688,10 @@ static long vma_commit_reservation(struct hstate *h, return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } -static void vma_abort_reservation(struct hstate *h, +static void vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - (void)__vma_reservation_common(h, vma, addr, VMA_ABORT_RESV); + (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } static struct page *alloc_huge_page(struct vm_area_struct *vma, @@ -1670,7 +1718,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return ERR_PTR(-ENOMEM); if (chg || avoid_reserve) if (hugepage_subpool_get_pages(spool, 1) < 0) { - vma_abort_reservation(h, vma, addr); + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } @@ -1718,7 +1766,7 @@ out_uncharge_cgroup: out_subpool_put: if (chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); - vma_abort_reservation(h, vma, addr); + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } @@ -3365,7 +3413,7 @@ retry: goto backout_unlocked; } /* Just decrements count, does not deallocate */ - vma_abort_reservation(h, vma, address); + vma_end_reservation(h, vma, address); } ptl = huge_pte_lockptr(h, mm, ptep); @@ -3514,7 +3562,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto out_mutex; } /* Just decrements count, does not deallocate */ - vma_abort_reservation(h, vma, address); + vma_end_reservation(h, vma, address); if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, @@ -3870,7 +3918,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) long gbl_reserve; if (resv_map) - chg = region_truncate(resv_map, offset); + chg = region_del(resv_map, offset, LONG_MAX); spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); -- cgit v1.2.3-59-g8ed1b From c672c7f29f2fdb73e1f72911bf499675c81fcdbb Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:35 -0700 Subject: mm/hugetlb: expose hugetlb fault mutex for use by fallocate hugetlb page faults are currently synchronized by the table of mutexes (htlb_fault_mutex_table). fallocate code will need to synchronize with the page fault code when it allocates or deletes pages. Expose interfaces so that fallocate operations can be synchronized with page faults. Minor name changes to be more consistent with other global hugetlb symbols. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e2d94960b38b..530cf6fc24c7 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -88,6 +88,11 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); void free_huge_page(struct page *page); +extern struct mutex *hugetlb_fault_mutex_table; +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, + struct vm_area_struct *vma, + struct address_space *mapping, + pgoff_t idx, unsigned long address); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 78e7eded4063..070880fe1ff7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -64,7 +64,7 @@ DEFINE_SPINLOCK(hugetlb_lock); * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; -static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; +struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -2482,7 +2482,7 @@ static void __exit hugetlb_exit(void) } kobject_put(hugepages_kobj); - kfree(htlb_fault_mutex_table); + kfree(hugetlb_fault_mutex_table); } module_exit(hugetlb_exit); @@ -2515,12 +2515,12 @@ static int __init hugetlb_init(void) #else num_fault_mutexes = 1; #endif - htlb_fault_mutex_table = + hugetlb_fault_mutex_table = kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); - BUG_ON(!htlb_fault_mutex_table); + BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) - mutex_init(&htlb_fault_mutex_table[i]); + mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } module_init(hugetlb_init); @@ -3454,7 +3454,7 @@ backout_unlocked: } #ifdef CONFIG_SMP -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3479,7 +3479,7 @@ static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3527,8 +3527,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); - mutex_lock(&htlb_fault_mutex_table[hash]); + hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); + mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { @@ -3613,7 +3613,7 @@ out_ptl: put_page(pagecache_page); } out_mutex: - mutex_unlock(&htlb_fault_mutex_table[hash]); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and -- cgit v1.2.3-59-g8ed1b From b5cec28d36f5ee6b4e6f68a0a40aa1e4045d6d99 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:41 -0700 Subject: hugetlbfs: truncate_hugepages() takes a range of pages Modify truncate_hugepages() to take a range of pages (start, end) instead of simply start. If an end value of LLONG_MAX is passed, the current "truncate" functionality is maintained. Existing callers are modified to pass LLONG_MAX as end of range. By keying off end == LLONG_MAX, the routine behaves differently for truncate and hole punch. Page removal is now synchronized with page allocation via faults by using the fault mutex table. The hole punch case can experience the rare region_del error and must handle accordingly. Add the routine hugetlb_fix_reserve_counts to fix up reserve counts in the case where region_del returns an error. Since the routine handles more than just the truncate case, it is renamed to remove_inode_hugepages(). To be consistent, the routine truncate_huge_page() is renamed remove_huge_page(). Downstream of remove_inode_hugepages(), the routine hugetlb_unreserve_pages() is also modified to take a range of pages. hugetlb_unreserve_pages is modified to detect an error from region_del and pass it back to the caller. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 98 ++++++++++++++++++++++++++++++++++++++++++++----- include/linux/hugetlb.h | 4 +- mm/hugetlb.c | 40 ++++++++++++++++++-- 3 files changed, 128 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b1e197d38abb..1ef630f81c99 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -293,26 +293,61 @@ static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, return -EINVAL; } -static void truncate_huge_page(struct page *page) +static void remove_huge_page(struct page *page) { ClearPageDirty(page); ClearPageUptodate(page); delete_from_page_cache(page); } -static void truncate_hugepages(struct inode *inode, loff_t lstart) + +/* + * remove_inode_hugepages handles two distinct cases: truncation and hole + * punch. There are subtle differences in operation for each case. + + * truncation is indicated by end of range being LLONG_MAX + * In this case, we first scan the range and release found pages. + * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv + * maps and global counts. + * hole punch is indicated if end is not LLONG_MAX + * In the hole punch case we scan the range and release found pages. + * Only when releasing a page is the associated region/reserv map + * deleted. The region/reserv map for ranges without associated + * pages are not modified. + * Note: If the passed end of range value is beyond the end of file, but + * not LLONG_MAX this routine still performs a hole punch operation. + */ +static void remove_inode_hugepages(struct inode *inode, loff_t lstart, + loff_t lend) { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> huge_page_shift(h); + const pgoff_t end = lend >> huge_page_shift(h); + struct vm_area_struct pseudo_vma; struct pagevec pvec; pgoff_t next; int i, freed = 0; + long lookup_nr = PAGEVEC_SIZE; + bool truncate_op = (lend == LLONG_MAX); + memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pagevec_init(&pvec, 0); next = start; - while (1) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + while (next < end) { + /* + * Make sure to never grab more pages that we + * might possibly need. + */ + if (end - next < lookup_nr) + lookup_nr = end - next; + + /* + * This pagevec_lookup() may return pages past 'end', + * so we must check for page->index > end. + */ + if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { if (next == start) break; next = start; @@ -321,26 +356,69 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart) for (i = 0; i < pagevec_count(&pvec); ++i) { struct page *page = pvec.pages[i]; + u32 hash; + + hash = hugetlb_fault_mutex_hash(h, current->mm, + &pseudo_vma, + mapping, next, 0); + mutex_lock(&hugetlb_fault_mutex_table[hash]); lock_page(page); + if (page->index >= end) { + unlock_page(page); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + next = end; /* we are done */ + break; + } + + /* + * If page is mapped, it was faulted in after being + * unmapped. Do nothing in this race case. In the + * normal case page is not mapped. + */ + if (!page_mapped(page)) { + bool rsv_on_error = !PagePrivate(page); + /* + * We must free the huge page and remove + * from page cache (remove_huge_page) BEFORE + * removing the region/reserve map + * (hugetlb_unreserve_pages). In rare out + * of memory conditions, removal of the + * region/reserve map could fail. Before + * free'ing the page, note PagePrivate which + * is used in case of error. + */ + remove_huge_page(page); + freed++; + if (!truncate_op) { + if (unlikely(hugetlb_unreserve_pages( + inode, next, + next + 1, 1))) + hugetlb_fix_reserve_counts( + inode, rsv_on_error); + } + } + if (page->index > next) next = page->index; + ++next; - truncate_huge_page(page); unlock_page(page); - freed++; + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); } huge_pagevec_release(&pvec); } - BUG_ON(!lstart && mapping->nrpages); - hugetlb_unreserve_pages(inode, start, freed); + + if (truncate_op) + (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); } static void hugetlbfs_evict_inode(struct inode *inode) { struct resv_map *resv_map; - truncate_hugepages(inode, 0); + remove_inode_hugepages(inode, 0, LLONG_MAX); resv_map = (struct resv_map *)inode->i_mapping->private_data; /* root inode doesn't have the resv_map, so we should check it */ if (resv_map) @@ -397,7 +475,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) if (!RB_EMPTY_ROOT(&mapping->i_mmap)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); i_mmap_unlock_write(mapping); - truncate_hugepages(inode, offset); + remove_inode_hugepages(inode, offset, LLONG_MAX); return 0; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 530cf6fc24c7..35afca1692fb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -83,11 +83,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); -void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); +long hugetlb_unreserve_pages(struct inode *inode, long start, long end, + long freed); int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); void free_huge_page(struct page *page); +void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 070880fe1ff7..61c52cd5f77b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -546,6 +546,28 @@ retry: return del; } +/* + * A rare out of memory error was encountered which prevented removal of + * the reserve map region for a page. The huge page itself was free'ed + * and removed from the page cache. This routine will adjust the subpool + * usage count, and the global reserve count if needed. By incrementing + * these counts, the reserve map entry which could not be deleted will + * appear as a "reserved" entry instead of simply dangling with incorrect + * counts. + */ +void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve) +{ + struct hugepage_subpool *spool = subpool_inode(inode); + long rsv_adjust; + + rsv_adjust = hugepage_subpool_get_pages(spool, 1); + if (restore_reserve && rsv_adjust) { + struct hstate *h = hstate_inode(inode); + + hugetlb_acct_memory(h, 1); + } +} + /* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). @@ -3909,7 +3931,8 @@ out_err: return ret; } -void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) +long hugetlb_unreserve_pages(struct inode *inode, long start, long end, + long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); @@ -3917,8 +3940,17 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; - if (resv_map) - chg = region_del(resv_map, offset, LONG_MAX); + if (resv_map) { + chg = region_del(resv_map, start, end); + /* + * region_del() can fail in the rare case where a region + * must be split and another region descriptor can not be + * allocated. If end == LONG_MAX, it will not fail. + */ + if (chg < 0) + return chg; + } + spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); @@ -3929,6 +3961,8 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); + + return 0; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE -- cgit v1.2.3-59-g8ed1b From 1fb1b0e9ef2d661488f8053986c3b7641cae529d Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:44 -0700 Subject: mm/hugetlb: vma_has_reserves() needs to handle fallocate hole punch In vma_has_reserves(), the current assumption is that reserves are always present for shared mappings. However, this will not be the case with fallocate hole punch. When punching a hole, the present page will be deleted as well as the region/reserve map entry (and hence any reservation). vma_has_reserves is passed "chg" which indicates whether or not a region/reserve map is present. Use this to determine if reserves are actually present or were removed via hole punch. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 61c52cd5f77b..bd12e8c8bc7b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -801,8 +801,19 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) } /* Shared mappings always use reserves */ - if (vma->vm_flags & VM_MAYSHARE) - return true; + if (vma->vm_flags & VM_MAYSHARE) { + /* + * We know VM_NORESERVE is not set. Therefore, there SHOULD + * be a region map for all pages. The only situation where + * there is no region map is if a hole was punched via + * fallocate. In this case, there really are no reverves to + * use. This situation is indicated if chg != 0. + */ + if (chg) + return false; + else + return true; + } /* * Only the process that called mmap() has reserves for -- cgit v1.2.3-59-g8ed1b From d85f69b0b533ec6d7ac8c21db958c44c6d957c90 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:47 -0700 Subject: mm/hugetlb: alloc_huge_page handle areas hole punched by fallocate Areas hole punched by fallocate will not have entries in the region/reserve map. However, shared mappings with min_size subpool reservations may still have reserved pages. alloc_huge_page needs to handle this special case and do the proper accounting. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 54 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bd12e8c8bc7b..114ad6ce7030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1733,34 +1733,58 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; - long chg, commit; + long map_chg, map_commit; + long gbl_chg; int ret, idx; struct hugetlb_cgroup *h_cg; idx = hstate_index(h); /* - * Processes that did not create the mapping will have no - * reserves and will not have accounted against subpool - * limit. Check that the subpool limit can be made before - * satisfying the allocation MAP_NORESERVE mappings may also - * need pages and subpool limit allocated allocated if no reserve - * mapping overlaps. + * Examine the region/reserve map to determine if the process + * has a reservation for the page to be allocated. A return + * code of zero indicates a reservation exists (no change). */ - chg = vma_needs_reservation(h, vma, addr); - if (chg < 0) + map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); + if (map_chg < 0) return ERR_PTR(-ENOMEM); - if (chg || avoid_reserve) - if (hugepage_subpool_get_pages(spool, 1) < 0) { + + /* + * Processes that did not create the mapping will have no + * reserves as indicated by the region/reserve map. Check + * that the allocation will not exceed the subpool limit. + * Allocations for MAP_NORESERVE mappings also need to be + * checked against any subpool limit. + */ + if (map_chg || avoid_reserve) { + gbl_chg = hugepage_subpool_get_pages(spool, 1); + if (gbl_chg < 0) { vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } + /* + * Even though there was no reservation in the region/reserve + * map, there could be reservations associated with the + * subpool that can be used. This would be indicated if the + * return value of hugepage_subpool_get_pages() is zero. + * However, if avoid_reserve is specified we still avoid even + * the subpool reservations. + */ + if (avoid_reserve) + gbl_chg = 1; + } + ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; spin_lock(&hugetlb_lock); - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); + /* + * glb_chg is passed to indicate whether or not a page must be taken + * from the global free pool (global change). gbl_chg == 0 indicates + * a reservation exists for the allocation. + */ + page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page(h, NUMA_NO_NODE); @@ -1776,8 +1800,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_private(page, (unsigned long)spool); - commit = vma_commit_reservation(h, vma, addr); - if (unlikely(chg > commit)) { + map_commit = vma_commit_reservation(h, vma, addr); + if (unlikely(map_chg > map_commit)) { /* * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. @@ -1797,7 +1821,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (chg || avoid_reserve) + if (map_chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); -- cgit v1.2.3-59-g8ed1b From ab76ad540a50191308e5bb6b5e2d9e26c78616d3 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:50 -0700 Subject: hugetlbfs: New huge_add_to_page_cache helper routine Currently, there is only a single place where hugetlbfs pages are added to the page cache. The new fallocate code be adding a second one, so break the functionality out into its own helper. Signed-off-by: Dave Hansen Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 ++ mm/hugetlb.c | 27 ++++++++++++++++++--------- 2 files changed, 20 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 35afca1692fb..1222fb07a746 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -333,6 +333,8 @@ struct huge_bootmem_page { struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); +int huge_add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t idx); /* arch callback */ int __init alloc_bootmem_huge_page(struct hstate *h); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 114ad6ce7030..d45eacc5653e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3375,6 +3375,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, return page != NULL; } +int huge_add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t idx) +{ + struct inode *inode = mapping->host; + struct hstate *h = hstate_inode(inode); + int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + + if (err) + return err; + ClearPagePrivate(page); + + spin_lock(&inode->i_lock); + inode->i_blocks += blocks_per_huge_page(h); + spin_unlock(&inode->i_lock); + return 0; +} + static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) @@ -3422,21 +3439,13 @@ retry: set_page_huge_active(page); if (vma->vm_flags & VM_MAYSHARE) { - int err; - struct inode *inode = mapping->host; - - err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + int err = huge_add_to_page_cache(page, mapping, idx); if (err) { put_page(page); if (err == -EEXIST) goto retry; goto out; } - ClearPagePrivate(page); - - spin_lock(&inode->i_lock); - inode->i_blocks += blocks_per_huge_page(h); - spin_unlock(&inode->i_lock); } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { -- cgit v1.2.3-59-g8ed1b From 70c3547e36f5c9fbc4caecfeca98f0effa6932c5 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:54 -0700 Subject: hugetlbfs: add hugetlbfs_fallocate() This is based on the shmem version, but it has diverged quite a bit. We have no swap to worry about, nor the new file sealing. Add synchronication via the fault mutex table to coordinate page faults, fallocate allocation and fallocate hole punch. What this allows us to do is move physical memory in and out of a hugetlbfs file without having it mapped. This also gives us the ability to support MADV_REMOVE since it is currently implemented using fallocate(). MADV_REMOVE lets madvise() remove pages from the middle of a hugetlbfs file, which wasn't possible before. hugetlbfs fallocate only operates on whole huge pages. Based on code by Dave Hansen. Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++- include/linux/hugetlb.h | 3 + mm/hugetlb.c | 2 +- 3 files changed, 182 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 1ef630f81c99..316adb968b65 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -12,6 +12,7 @@ #include #include #include /* remove ASAP */ +#include #include #include #include @@ -84,6 +85,29 @@ static const match_table_t tokens = { {Opt_err, NULL}, }; +#ifdef CONFIG_NUMA +static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, + struct inode *inode, pgoff_t index) +{ + vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, + index); +} + +static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) +{ + mpol_cond_put(vma->vm_policy); +} +#else +static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, + struct inode *inode, pgoff_t index) +{ +} + +static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) +{ +} +#endif + static void huge_pagevec_release(struct pagevec *pvec) { int i; @@ -479,6 +503,158 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) return 0; } +static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) +{ + struct hstate *h = hstate_inode(inode); + loff_t hpage_size = huge_page_size(h); + loff_t hole_start, hole_end; + + /* + * For hole punch round up the beginning offset of the hole and + * round down the end. + */ + hole_start = round_up(offset, hpage_size); + hole_end = round_down(offset + len, hpage_size); + + if (hole_end > hole_start) { + struct address_space *mapping = inode->i_mapping; + + mutex_lock(&inode->i_mutex); + i_mmap_lock_write(mapping); + if (!RB_EMPTY_ROOT(&mapping->i_mmap)) + hugetlb_vmdelete_list(&mapping->i_mmap, + hole_start >> PAGE_SHIFT, + hole_end >> PAGE_SHIFT); + i_mmap_unlock_write(mapping); + remove_inode_hugepages(inode, hole_start, hole_end); + mutex_unlock(&inode->i_mutex); + } + + return 0; +} + +static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) +{ + struct inode *inode = file_inode(file); + struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_inode(inode); + struct vm_area_struct pseudo_vma; + struct mm_struct *mm = current->mm; + loff_t hpage_size = huge_page_size(h); + unsigned long hpage_shift = huge_page_shift(h); + pgoff_t start, index, end; + int error; + u32 hash; + + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + if (mode & FALLOC_FL_PUNCH_HOLE) + return hugetlbfs_punch_hole(inode, offset, len); + + /* + * Default preallocate case. + * For this range, start is rounded down and end is rounded up + * as well as being converted to page offsets. + */ + start = offset >> hpage_shift; + end = (offset + len + hpage_size - 1) >> hpage_shift; + + mutex_lock(&inode->i_mutex); + + /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ + error = inode_newsize_ok(inode, offset + len); + if (error) + goto out; + + /* + * Initialize a pseudo vma as this is required by the huge page + * allocation routines. If NUMA is configured, use page index + * as input to create an allocation policy. + */ + memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); + pseudo_vma.vm_file = file; + + for (index = start; index < end; index++) { + /* + * This is supposed to be the vaddr where the page is being + * faulted in, but we have no vaddr here. + */ + struct page *page; + unsigned long addr; + int avoid_reserve = 0; + + cond_resched(); + + /* + * fallocate(2) manpage permits EINTR; we may have been + * interrupted because we are using up too much memory. + */ + if (signal_pending(current)) { + error = -EINTR; + break; + } + + /* Set numa allocation policy based on index */ + hugetlb_set_vma_policy(&pseudo_vma, inode, index); + + /* addr is the offset within the file (zero based) */ + addr = index * hpage_size; + + /* mutex taken here, fault path and hole punch */ + hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, + index, addr); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* See if already present in mapping to avoid alloc/free */ + page = find_get_page(mapping, index); + if (page) { + put_page(page); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + hugetlb_drop_vma_policy(&pseudo_vma); + continue; + } + + /* Allocate page and add to page cache */ + page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); + hugetlb_drop_vma_policy(&pseudo_vma); + if (IS_ERR(page)) { + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + error = PTR_ERR(page); + goto out; + } + clear_huge_page(page, addr, pages_per_huge_page(h)); + __SetPageUptodate(page); + error = huge_add_to_page_cache(page, mapping, index); + if (unlikely(error)) { + put_page(page); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + goto out; + } + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + + /* + * page_put due to reference from alloc_huge_page() + * unlock_page because locked by add_to_page_cache() + */ + put_page(page); + unlock_page(page); + } + + if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) + i_size_write(inode, offset + len); + inode->i_ctime = CURRENT_TIME; + spin_lock(&inode->i_lock); + inode->i_private = NULL; + spin_unlock(&inode->i_lock); +out: + mutex_unlock(&inode->i_mutex); + return error; +} + static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); @@ -790,7 +966,8 @@ const struct file_operations hugetlbfs_file_operations = { .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, - .llseek = default_llseek, + .llseek = default_llseek, + .fallocate = hugetlbfs_fallocate, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1222fb07a746..5e35379f58a5 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -330,6 +330,8 @@ struct huge_bootmem_page { #endif }; +struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); @@ -483,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +#define alloc_huge_page(v, a, r) NULL #define alloc_huge_page_node(h, nid) NULL #define alloc_huge_page_noerr(v, a, r) NULL #define alloc_bootmem_huge_page(h) NULL diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d45eacc5653e..cd1280c487ff 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1727,7 +1727,7 @@ static void vma_end_reservation(struct hstate *h, (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } -static struct page *alloc_huge_page(struct vm_area_struct *vma, +struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); -- cgit v1.2.3-59-g8ed1b From 72079ba0dfefc1444b4ef98a2fa3d040838a775f Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:01:57 -0700 Subject: mm: madvise allow remove operation for hugetlbfs Now that we have hole punching support for hugetlbfs, we can also support the MADV_REMOVE interface to it. Signed-off-by: Dave Hansen Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Acked-by: Hillf Danton Cc: David Rientjes Cc: Hugh Dickins Cc: Davidlohr Bueso Cc: Aneesh Kumar Cc: Christoph Hellwig Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/madvise.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index ce3a4222c7e7..c889fcbb530e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -301,7 +301,7 @@ static long madvise_remove(struct vm_area_struct *vma, *prev = NULL; /* tell sys_madvise we drop mmap_sem */ - if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB)) + if (vma->vm_flags & VM_LOCKED) return -EINVAL; f = vma->vm_file; -- cgit v1.2.3-59-g8ed1b From c5c5c9d1008fb15945d0173b3ca75931ef53ae1f Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 8 Sep 2015 15:02:00 -0700 Subject: mm/memblock.c: make memblock_overlaps_region() return bool. memblock_overlaps_region() checks if the given memblock region intersects a region in memblock. If so, it returns the index of the intersected region. But its only caller is memblock_is_region_reserved(), and it returns 0 if false, non-zero if true. Both of these should return bool. Signed-off-by: Tang Chen Cc: Thomas Gleixner Cc: Tejun Heo Cc: Yasuaki Ishimatsu Cc: Luiz Capitulino Cc: Xishi Qiu Cc: Will Deacon Cc: Vladimir Murzin Cc: Fabian Frederick Cc: Alexander Kuleshov Cc: Baoquan He Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 2 +- mm/memblock.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cc4b01972060..d312ae3b51fc 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -323,7 +323,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit); int memblock_is_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); int memblock_is_reserved(phys_addr_t addr); -int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); +bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); extern void __memblock_dump_all(void); diff --git a/mm/memblock.c b/mm/memblock.c index bde61e8c28c5..08a5126338db 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -91,7 +91,7 @@ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, p return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long __init_memblock memblock_overlaps_region(struct memblock_type *type, +static bool __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; @@ -103,7 +103,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, break; } - return (i < type->cnt) ? i : -1; + return i < type->cnt; } /* @@ -1566,12 +1566,12 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size * Check if the region [@base, @base+@size) intersects a reserved memory block. * * RETURNS: - * 0 if false, non-zero if true + * True if they intersect, false if not. */ -int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) +bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { memblock_cap_size(base, &size); - return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; + return memblock_overlaps_region(&memblock.reserved, base, size); } void __init_memblock memblock_trim_memory(phys_addr_t align) -- cgit v1.2.3-59-g8ed1b From 95cf82ecc1fcb44df1768162343cc8eb88083b86 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 8 Sep 2015 15:02:03 -0700 Subject: mem-hotplug: handle node hole when initializing numa_meminfo. When parsing SRAT, all memory ranges are added into numa_meminfo. In numa_init(), before entering numa_cleanup_meminfo(), all possible memory ranges are in numa_meminfo. And numa_cleanup_meminfo() removes all ranges over max_pfn or empty. But, this only works if the nodes are continuous. Let's have a look at the following example: We have an SRAT like this: SRAT: Node 0 PXM 0 [mem 0x00000000-0x5fffffff] SRAT: Node 0 PXM 0 [mem 0x100000000-0x1ffffffffff] SRAT: Node 1 PXM 1 [mem 0x20000000000-0x3ffffffffff] SRAT: Node 4 PXM 2 [mem 0x40000000000-0x5ffffffffff] hotplug SRAT: Node 5 PXM 3 [mem 0x60000000000-0x7ffffffffff] hotplug SRAT: Node 2 PXM 4 [mem 0x80000000000-0x9ffffffffff] hotplug SRAT: Node 3 PXM 5 [mem 0xa0000000000-0xbffffffffff] hotplug SRAT: Node 6 PXM 6 [mem 0xc0000000000-0xdffffffffff] hotplug SRAT: Node 7 PXM 7 [mem 0xe0000000000-0xfffffffffff] hotplug On boot, only node 0,1,2,3 exist. And the numa_meminfo will look like this: numa_meminfo.nr_blks = 9 1. on node 0: [0, 60000000] 2. on node 0: [100000000, 20000000000] 3. on node 1: [20000000000, 40000000000] 4. on node 4: [40000000000, 60000000000] 5. on node 5: [60000000000, 80000000000] 6. on node 2: [80000000000, a0000000000] 7. on node 3: [a0000000000, a0800000000] 8. on node 6: [c0000000000, a0800000000] 9. on node 7: [e0000000000, a0800000000] And numa_cleanup_meminfo() will merge 1 and 2, and remove 8,9 because the end address is over max_pfn, which is a0800000000. But 4 and 5 are not removed because their end addresses are less then max_pfn. But in fact, node 4 and 5 don't exist. In a word, numa_cleanup_meminfo() is not able to handle holes between nodes. Since memory ranges in node 4 and 5 are in numa_meminfo, in numa_register_memblks(), node 4 and 5 will be mistakenly set to online. If you run lscpu, it will show: NUMA node0 CPU(s): 0-14,128-142 NUMA node1 CPU(s): 15-29,143-157 NUMA node2 CPU(s): NUMA node3 CPU(s): NUMA node4 CPU(s): 62-76,190-204 NUMA node5 CPU(s): 78-92,206-220 In this patch, we use memblock_overlaps_region() to check if ranges in numa_meminfo overlap with ranges in memory_block. Since memory_block contains all available memory at boot time, if they overlap, it means the ranges exist. If not, then remove them from numa_meminfo. After this patch, lscpu will show: NUMA node0 CPU(s): 0-14,128-142 NUMA node1 CPU(s): 15-29,143-157 NUMA node4 CPU(s): 62-76,190-204 NUMA node5 CPU(s): 78-92,206-220 Signed-off-by: Tang Chen Reviewed-by: Yasuaki Ishimatsu Cc: Thomas Gleixner Cc: Tejun Heo Cc: Luiz Capitulino Cc: Xishi Qiu Cc: Will Deacon Cc: Vladimir Murzin Cc: Fabian Frederick Cc: Alexander Kuleshov Cc: Baoquan He Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/numa.c | 6 ++++-- include/linux/memblock.h | 2 ++ mm/memblock.c | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 4053bb58bf92..c3b3f653ed0c 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -246,8 +246,10 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi) bi->start = max(bi->start, low); bi->end = min(bi->end, high); - /* and there's no empty block */ - if (bi->start >= bi->end) + /* and there's no empty or non-exist block */ + if (bi->start >= bi->end || + !memblock_overlaps_region(&memblock.memory, + bi->start, bi->end - bi->start)) numa_remove_memblk_from(i--, mi); } diff --git a/include/linux/memblock.h b/include/linux/memblock.h index d312ae3b51fc..c518eb589260 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); +bool memblock_overlaps_region(struct memblock_type *type, + phys_addr_t base, phys_addr_t size); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); diff --git a/mm/memblock.c b/mm/memblock.c index 08a5126338db..509255223688 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -91,7 +91,7 @@ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, p return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static bool __init_memblock memblock_overlaps_region(struct memblock_type *type, +bool __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; -- cgit v1.2.3-59-g8ed1b From acda0c3340282bc7c36f4e9a5e2ccb7bb7e64676 Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Tue, 8 Sep 2015 15:02:06 -0700 Subject: mm/mempolicy.c: get rid of duplicated check for vma(VM_PFNMAP) in queue_pages_range() This check was introduced as part of 6f4576e3687 ("mempolicy: apply page table walker on queue_pages_range()") which got duplicated by 48684a65b4e ("mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP)") by reintroducing it earlier on queue_page_test_walk() Signed-off-by: Aristeu Rozanski Acked-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: Andrea Arcangeli Acked-by: Cyrill Gorcunov Cc: Dave Hansen Cc: Kirill A. Shutemov Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a7f1e0d1d6b8..d6f2caee28c0 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -608,9 +608,6 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, qp->prev = vma; - if (vma->vm_flags & VM_PFNMAP) - return 1; - if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) -- cgit v1.2.3-59-g8ed1b From c5b4e1b02f2a0c2309ecd58a235a2f5ee4eb0074 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 8 Sep 2015 15:02:09 -0700 Subject: mm, page_isolation: make set/unset_migratetype_isolate() file-local Nowaday, set/unset_migratetype_isolate() is defined and used only in mm/page_isolation, so let's limit the scope within the file. Signed-off-by: Naoya Horiguchi Acked-by: David Rientjes Acked-by: Vlastimil Babka Cc: Joonsoo Kim Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-isolation.h | 5 ----- mm/page_isolation.c | 5 +++-- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 2dc1e1697b45..047d64706f2a 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages); -/* - * Internal functions. Changes pageblock's migrate type. - */ -int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages); -void unset_migratetype_isolate(struct page *page, unsigned migratetype); struct page *alloc_migrate_target(struct page *page, unsigned long private, int **resultp); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 32fdc1df05e5..4568fd58f70a 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -9,7 +9,8 @@ #include #include "internal.h" -int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) +static int set_migratetype_isolate(struct page *page, + bool skip_hwpoisoned_pages) { struct zone *zone; unsigned long flags, pfn; @@ -72,7 +73,7 @@ out: return ret; } -void unset_migratetype_isolate(struct page *page, unsigned migratetype) +static void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; -- cgit v1.2.3-59-g8ed1b From 1b4ace4141db1ddc46f6c9915086dd5e18d7154d Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Tue, 8 Sep 2015 15:02:12 -0700 Subject: bootmem: avoid freeing to bootmem after bootmem is done Bootmem isn't popular any more, but some architectures still use it, and freeing to bootmem after calling free_all_bootmem_core() can end up scribbling over random memory. Instead, make sure the kernel generates a warning in this case by ensuring the node_bootmem_map field is non-NULL when are freeing or marking bootmem. An instance of this bug was just fixed in the tile architecture ("tile: use free_bootmem_late() for initrd") and catching this case more widely seems like a good thing. Signed-off-by: Chris Metcalf Acked-by: Mel Gorman Cc: Yasuaki Ishimatsu Cc: Pekka Enberg Cc: Paul McQuade Cc: Tang Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/bootmem.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index a23dd1934654..3b6380784c28 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -236,6 +236,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) count += pages; while (pages--) __free_pages_bootmem(page++, cur++, 0); + bdata->node_bootmem_map = NULL; bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); @@ -294,6 +295,9 @@ static void __init __free(bootmem_data_t *bdata, sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn); + if (WARN_ON(bdata->node_bootmem_map == NULL)) + return; + if (bdata->hint_idx > sidx) bdata->hint_idx = sidx; @@ -314,6 +318,9 @@ static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, eidx + bdata->node_min_pfn, flags); + if (WARN_ON(bdata->node_bootmem_map == NULL)) + return 0; + for (idx = sidx; idx < eidx; idx++) if (test_and_set_bit(idx, bdata->node_bootmem_map)) { if (exclusive) { -- cgit v1.2.3-59-g8ed1b From 0b802f101d0c6caeeee89066dc2c8665082a83df Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 8 Sep 2015 15:02:18 -0700 Subject: mm: vmscan: never isolate more pages than necessary If transparent huge pages are enabled, we can isolate many more pages than we actually need to scan, because we count both single and huge pages equally in isolate_lru_pages(). Since commit 5bc7b8aca942d ("mm: thp: add split tail pages to shrink page list in page reclaim"), we scan all the tail pages immediately after a huge page split (see shrink_page_list()). As a result, we can reclaim up to SWAP_CLUSTER_MAX * HPAGE_PMD_NR (512 MB) in one run! This is easy to catch on memcg reclaim with zswap enabled. The latter makes swapout instant so that if we happen to scan an unreferenced huge page we will evict both its head and tail pages immediately, which is likely to result in excessive reclaim. Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Reviewed-by: Michal Hocko Cc: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index bf23c88621ce..0b1aab411cb0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1359,7 +1359,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long nr_taken = 0; unsigned long scan; - for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { + for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && + !list_empty(src); scan++) { struct page *page; int nr_pages; -- cgit v1.2.3-59-g8ed1b From c54839a722a02818677bcabe57e957f0ce4f841d Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Tue, 8 Sep 2015 15:02:21 -0700 Subject: vmscan: fix increasing nr_isolated incurred by putback unevictable pages reclaim_clean_pages_from_list() assumes that shrink_page_list() returns number of pages removed from the candidate list. But shrink_page_list() puts back mlocked pages without passing it to caller and without counting as nr_reclaimed. This increases nr_isolated. To fix this, this patch changes shrink_page_list() to pass unevictable pages back to caller. Caller will take care those pages. Minchan said: It fixes two issues. 1. With unevictable page, cma_alloc will be successful. Exactly speaking, cma_alloc of current kernel will fail due to unevictable pages. 2. fix leaking of NR_ISOLATED counter of vmstat With it, too_many_isolated works. Otherwise, it could make hang until the process get SIGKILL. Signed-off-by: Jaewon Kim Acked-by: Minchan Kim Cc: Mel Gorman Acked-by: Vlastimil Babka Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 0b1aab411cb0..8276a3a615ca 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1196,7 +1196,7 @@ cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); - putback_lru_page(page); + list_add(&page->lru, &ret_pages); continue; activate_locked: -- cgit v1.2.3-59-g8ed1b From fa23f56d90ed7bd760ae2aea6dfb2f501a099e90 Mon Sep 17 00:00:00 2001 From: "Sean O. Stalley" Date: Tue, 8 Sep 2015 15:02:24 -0700 Subject: mm: add support for __GFP_ZERO flag to dma_pool_alloc() Currently a call to dma_pool_alloc() with a ___GFP_ZERO flag returns a non-zeroed memory region. This patchset adds support for the __GFP_ZERO flag to dma_pool_alloc(), adds 2 wrapper functions for allocing zeroed memory from a pool, and provides a coccinelle script for finding & replacing instances of dma_pool_alloc() followed by memset(0) with a single dma_pool_zalloc() call. There was some concern that this always calls memset() to zero, instead of passing __GFP_ZERO into the page allocator. [https://lkml.org/lkml/2015/7/15/881] I ran a test on my system to get an idea of how often dma_pool_alloc() calls into pool_alloc_page(). After Boot: [ 30.119863] alloc_calls:541, page_allocs:7 After an hour: [ 3600.951031] alloc_calls:9566, page_allocs:12 After copying 1GB file onto a USB drive: [ 4260.657148] alloc_calls:17225, page_allocs:12 It doesn't look like dma_pool_alloc() calls down to the page allocator very often (at least on my system). This patch (of 4): Currently the __GFP_ZERO flag is ignored by dma_pool_alloc(). Make dma_pool_alloc() zero the memory if this flag is set. Signed-off-by: Sean O. Stalley Acked-by: David Rientjes Cc: Vinod Koul Cc: Bjorn Helgaas Cc: Gilles Muller Cc: Nicolas Palix Cc: Michal Marek Cc: Sebastian Andrzej Siewior Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/dmapool.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/dmapool.c b/mm/dmapool.c index 4b657099111f..71a8998cd03a 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -337,7 +337,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ spin_unlock_irqrestore(&pool->lock, flags); - page = pool_alloc_page(pool, mem_flags); + page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); if (!page) return NULL; @@ -375,9 +375,14 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, break; } } - memset(retval, POOL_POISON_ALLOCATED, pool->size); + if (!(mem_flags & __GFP_ZERO)) + memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif spin_unlock_irqrestore(&pool->lock, flags); + + if (mem_flags & __GFP_ZERO) + memset(retval, 0, pool->size); + return retval; } EXPORT_SYMBOL(dma_pool_alloc); -- cgit v1.2.3-59-g8ed1b From f2849aa09d4fbc4145ebb5dc96187c9ab967f5cf Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:02:36 -0700 Subject: mm, compaction: more robust check for scanners meeting Assorted compaction cleanups and optimizations. The interesting patches are 4 and 5. In 4, skipping of compound pages in single iteration is improved for migration scanner, so it works also for !PageLRU compound pages such as hugetlbfs, slab etc. Patch 5 introduces this kind of skipping in the free scanner. The trick is that we can read compound_order() without any protection, if we are careful to filter out values larger than MAX_ORDER. The only danger is that we skip too much. The same trick was already used for reading the freepage order in the migrate scanner. To demonstrate improvements of Patches 4 and 5 I've run stress-highalloc from mmtests, set to simulate THP allocations (including __GFP_COMP) on a 4GB system where 1GB was occupied by hugetlbfs pages. I'll include just the relevant stats: Patch 3 Patch 4 Patch 5 Compaction stalls 7523 7529 7515 Compaction success 323 304 322 Compaction failures 7200 7224 7192 Page migrate success 247778 264395 240737 Page migrate failure 15358 33184 21621 Compaction pages isolated 906928 980192 909983 Compaction migrate scanned 2005277 1692805 1498800 Compaction free scanned 13255284 11539986 9011276 Compaction cost 288 305 277 With 5 iterations per patch, the results are still noisy, but we can see that Patch 4 does reduce migrate_scanned by 15% thanks to skipping the hugetlbfs pages at once. Interestingly, free_scanned is also reduced and I have no idea why. Patch 5 further reduces free_scanned as expected, by 15%. Other stats are unaffected modulo noise. [1] https://lkml.org/lkml/2015/1/19/158 This patch (of 5): Compaction should finish when the migration and free scanner meet, i.e. they reach the same pageblock. Currently however, the test in compact_finished() simply just compares the exact pfns, which may yield a false negative when the free scanner position is in the middle of a pageblock and the migration scanner reaches the begining of the same pageblock. This hasn't been a problem until commit e14c720efdd7 ("mm, compaction: remember position within pageblock in free pages scanner") allowed the free scanner position to be in the middle of a pageblock between invocations. The hot-fix 1d5bfe1ffb5b ("mm, compaction: prevent infinite loop in compact_zone") prevented the issue by adding a special check in the migration scanner to satisfy the current detection of scanners meeting. However, the proper fix is to make the detection more robust. This patch introduces the compact_scanners_met() function that returns true when the free scanner position is in the same or lower pageblock than the migration scanner. The special case in isolate_migratepages() introduced by 1d5bfe1ffb5b is removed. Suggested-by: Joonsoo Kim Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Acked-by: Joonsoo Kim Acked-by: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 018f08da99a2..7077b81a4893 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -897,6 +897,16 @@ static bool suitable_migration_target(struct page *page) return false; } +/* + * Test whether the free scanner has reached the same or lower pageblock than + * the migration scanner, and compaction should thus terminate. + */ +static inline bool compact_scanners_met(struct compact_control *cc) +{ + return (cc->free_pfn >> pageblock_order) + <= (cc->migrate_pfn >> pageblock_order); +} + /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. @@ -1127,12 +1137,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } acct_isolated(zone, cc); - /* - * Record where migration scanner will be restarted. If we end up in - * the same pageblock as the free scanner, make the scanners fully - * meet so that compact_finished() terminates compaction. - */ - cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; + /* Record where migration scanner will be restarted. */ + cc->migrate_pfn = low_pfn; return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } @@ -1147,7 +1153,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc, return COMPACT_PARTIAL; /* Compaction run completes if the migrate and free scanner meet */ - if (cc->free_pfn <= cc->migrate_pfn) { + if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; @@ -1376,7 +1382,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * migrate_pages() may return -ENOMEM when scanners meet * and we want compact_finished() to detect it */ - if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { + if (err == -ENOMEM && !compact_scanners_met(cc)) { ret = COMPACT_PARTIAL; goto out; } -- cgit v1.2.3-59-g8ed1b From f5f61a320bf6275f37fcabf6645b4ac8e683c007 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:02:39 -0700 Subject: mm, compaction: simplify handling restart position in free pages scanner Handling the position where compaction free scanner should restart (stored in cc->free_pfn) got more complex with commit e14c720efdd7 ("mm, compaction: remember position within pageblock in free pages scanner"). Currently the position is updated in each loop iteration of isolate_freepages(), although it should be enough to update it only when breaking from the loop. There's also an extra check outside the loop updates the position in case we have met the migration scanner. This can be simplified if we move the test for having isolated enough from the for-loop header next to the test for contention, and determining the restart position only in these cases. We can reuse the isolate_start_pfn variable for this instead of setting cc->free_pfn directly. Outside the loop, we can simply set cc->free_pfn to current value of isolate_start_pfn without any extra check. Also add a VM_BUG_ON to catch possible mistake in the future, in case we later add a new condition that terminates isolate_freepages_block() prematurely without also considering the condition in isolate_freepages(). Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Acked-by: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 7077b81a4893..2c1e1ff321bf 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -943,8 +943,7 @@ static void isolate_freepages(struct compact_control *cc) * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ - for (; block_start_pfn >= low_pfn && - cc->nr_migratepages > cc->nr_freepages; + for (; block_start_pfn >= low_pfn; block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { @@ -976,6 +975,8 @@ static void isolate_freepages(struct compact_control *cc) block_end_pfn, freelist, false); /* + * If we isolated enough freepages, or aborted due to async + * compaction being contended, terminate the loop. * Remember where the free scanner should restart next time, * which is where isolate_freepages_block() left off. * But if it scanned the whole pageblock, isolate_start_pfn @@ -984,27 +985,31 @@ static void isolate_freepages(struct compact_control *cc) * In that case we will however want to restart at the start * of the previous pageblock. */ - cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? - isolate_start_pfn : - block_start_pfn - pageblock_nr_pages; - - /* - * isolate_freepages_block() might have aborted due to async - * compaction being contended - */ - if (cc->contended) + if ((cc->nr_freepages >= cc->nr_migratepages) + || cc->contended) { + if (isolate_start_pfn >= block_end_pfn) + isolate_start_pfn = + block_start_pfn - pageblock_nr_pages; break; + } else { + /* + * isolate_freepages_block() should not terminate + * prematurely unless contended, or isolated enough + */ + VM_BUG_ON(isolate_start_pfn < block_end_pfn); + } } /* split_free_page does not map the pages */ map_pages(freelist); /* - * If we crossed the migrate scanner, we want to keep it that way - * so that compact_finished() may detect this + * Record where the free scanner will restart next time. Either we + * broke from the loop and set isolate_start_pfn based on the last + * call to isolate_freepages_block(), or we met the migration scanner + * and the loop terminated due to isolate_start_pfn < low_pfn */ - if (block_start_pfn < low_pfn) - cc->free_pfn = cc->migrate_pfn; + cc->free_pfn = isolate_start_pfn; } /* -- cgit v1.2.3-59-g8ed1b From 02333641e2cf4ac9f23eeeb01183ed8318d346ca Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:02:42 -0700 Subject: mm, compaction: encapsulate resetting cached scanner positions Reseting the cached compaction scanner positions is now open-coded in __reset_isolation_suitable() and compact_finished(). Encapsulate the functionality in a new function reset_cached_positions(). Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Acked-by: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 2c1e1ff321bf..0dce7e87d771 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -207,6 +207,13 @@ static inline bool isolation_suitable(struct compact_control *cc, return !get_pageblock_skip(page); } +static void reset_cached_positions(struct zone *zone) +{ + zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; + zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; + zone->compact_cached_free_pfn = zone_end_pfn(zone); +} + /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner @@ -218,9 +225,6 @@ static void __reset_isolation_suitable(struct zone *zone) unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; - zone->compact_cached_migrate_pfn[0] = start_pfn; - zone->compact_cached_migrate_pfn[1] = start_pfn; - zone->compact_cached_free_pfn = end_pfn; zone->compact_blockskip_flush = false; /* Walk the zone and mark every pageblock as suitable for isolation */ @@ -238,6 +242,8 @@ static void __reset_isolation_suitable(struct zone *zone) clear_pageblock_skip(page); } + + reset_cached_positions(zone); } void reset_isolation_suitable(pg_data_t *pgdat) @@ -1160,9 +1166,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc, /* Compaction run completes if the migrate and free scanner meet */ if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ - zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; - zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; - zone->compact_cached_free_pfn = zone_end_pfn(zone); + reset_cached_positions(zone); /* * Mark that the PG_migrate_skip information should be cleared -- cgit v1.2.3-59-g8ed1b From 29c0dde830f8c08ceacf2d3edf6dc8ddd9a9c3c4 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:02:46 -0700 Subject: mm, compaction: always skip all compound pages by order in migrate scanner The compaction migrate scanner tries to skip THP pages by their order, to reduce number of iterations for pages it cannot isolate. The check is only done if PageLRU() is true, which means it applies to THP pages, but not e.g. hugetlbfs pages or any other non-LRU compound pages, which we have to iterate by base pages. This limitation comes from the assumption that it's only safe to read compound_order() when we have the zone's lru_lock and THP cannot be split under us. But the only danger (after filtering out order values that are not below MAX_ORDER, to prevent overflows) is that we skip too much or too little after reading a bogus compound_order() due to a rare race. This is the same reasoning as patch 99c0fd5e51c4 ("mm, compaction: skip buddy pages by their order in the migrate scanner") introduced for unsafely reading PageBuddy() order. After this patch, all pages are tested for PageCompound() and we skip them by compound_order(). The test is done after the test for balloon_page_movable() as we don't want to assume if balloon pages (or other pages with own isolation and migration implementation if a generic API gets implemented) are compound or not. When tested with stress-highalloc from mmtests on 4GB system with 1GB hugetlbfs pages, the vmstat compact_migrate_scanned count decreased by 15%. [kirill.shutemov@linux.intel.com: change PageTransHuge checks to PageCompound for different series was squashed here] Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Acked-by: Joonsoo Kim Acked-by: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 0dce7e87d771..1ccb015ab1eb 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -680,6 +680,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { + bool is_lru; + /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort async compaction @@ -723,36 +725,35 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * It's possible to migrate LRU pages and balloon pages * Skip any other type of page */ - if (!PageLRU(page)) { + is_lru = PageLRU(page); + if (!is_lru) { if (unlikely(balloon_page_movable(page))) { if (balloon_page_isolate(page)) { /* Successfully isolated */ goto isolate_success; } } - continue; } /* - * PageLRU is set. lru_lock normally excludes isolation - * splitting and collapsing (collapsing has already happened - * if PageLRU is set) but the lock is not necessarily taken - * here and it is wasteful to take it just to check transhuge. - * Check TransHuge without lock and skip the whole pageblock if - * it's either a transhuge or hugetlbfs page, as calling - * compound_order() without preventing THP from splitting the - * page underneath us may return surprising results. + * Regardless of being on LRU, compound pages such as THP and + * hugetlbfs are not to be compacted. We can potentially save + * a lot of iterations if we skip them at once. The check is + * racy, but we can consider only valid values and the only + * danger is skipping too much. */ - if (PageTransHuge(page)) { - if (!locked) - low_pfn = ALIGN(low_pfn + 1, - pageblock_nr_pages) - 1; - else - low_pfn += (1 << compound_order(page)) - 1; + if (PageCompound(page)) { + unsigned int comp_order = compound_order(page); + + if (likely(comp_order < MAX_ORDER)) + low_pfn += (1UL << comp_order) - 1; continue; } + if (!is_lru) + continue; + /* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an @@ -769,11 +770,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!locked) break; - /* Recheck PageLRU and PageTransHuge under lock */ + /* Recheck PageLRU and PageCompound under lock */ if (!PageLRU(page)) continue; - if (PageTransHuge(page)) { - low_pfn += (1 << compound_order(page)) - 1; + + /* + * Page become compound since the non-locked check, + * and it's on LRU. It can only be a THP so the order + * is safe to read and it's 0 for tail pages. + */ + if (unlikely(PageCompound(page))) { + low_pfn += (1UL << compound_order(page)) - 1; continue; } } @@ -784,7 +791,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (__isolate_lru_page(page, isolate_mode) != 0) continue; - VM_BUG_ON_PAGE(PageTransCompound(page), page); + VM_BUG_ON_PAGE(PageCompound(page), page); /* Successfully isolated */ del_page_from_lru_list(page, lruvec, page_lru(page)); -- cgit v1.2.3-59-g8ed1b From 9fcd6d2e052eef525e94a9ae58dbe7ed4df4f5a7 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:02:49 -0700 Subject: mm, compaction: skip compound pages by order in free scanner The compaction free scanner is looking for PageBuddy() pages and skipping all others. For large compound pages such as THP or hugetlbfs, we can save a lot of iterations if we skip them at once using their compound_order(). This is generally unsafe and we can read a bogus value of order due to a race, but if we are careful, the only danger is skipping too much. When tested with stress-highalloc from mmtests on 4GB system with 1GB hugetlbfs pages, the vmstat compact_free_scanned count decreased by at least 15%. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Acked-by: Joonsoo Kim Acked-by: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 1ccb015ab1eb..8f64d3533990 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -437,6 +437,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (!valid_page) valid_page = page; + + /* + * For compound pages such as THP and hugetlbfs, we can save + * potentially a lot of iterations if we skip them at once. + * The check is racy, but we can consider only valid values + * and the only danger is skipping too much. + */ + if (PageCompound(page)) { + unsigned int comp_order = compound_order(page); + + if (likely(comp_order < MAX_ORDER)) { + blockpfn += (1UL << comp_order) - 1; + cursor += (1UL << comp_order) - 1; + } + + goto isolate_fail; + } + if (!PageBuddy(page)) goto isolate_fail; @@ -496,6 +514,13 @@ isolate_fail: } + /* + * There is a tiny chance that we have read bogus compound_order(), + * so be careful to not go outside of the pageblock. + */ + if (unlikely(blockpfn > end_pfn)) + blockpfn = end_pfn; + trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, nr_scanned, total_isolated); -- cgit v1.2.3-59-g8ed1b From 6b0f68e32ea8749ff7d4a66cd5761e915e48e59d Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Tue, 8 Sep 2015 15:03:01 -0700 Subject: mm: add utility for early copy from unmapped ram When booting an arm64 kernel w/initrd using UEFI/grub, use of mem= will likely cut off part or all of the initrd. This leaves it outside the kernel linear map which leads to failure when unpacking. The x86 code has a similar need to relocate an initrd outside of mapped memory in some cases. The current x86 code uses early_memremap() to copy the original initrd from unmapped to mapped RAM. This patchset creates a generic copy_from_early_mem() utility based on that x86 code and has arm64 and x86 share it in their respective initrd relocation code. This patch (of 3): In some early boot circumstances, it may be necessary to copy from RAM outside the kernel linear mapping to mapped RAM. The need to relocate an initrd is one example in the x86 code. This patch creates a helper function based on current x86 code. Signed-off-by: Mark Salter Cc: Catalin Marinas Cc: Will Deacon Cc: Arnd Bergmann Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Russell King Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/early_ioremap.h | 6 ++++++ mm/early_ioremap.c | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'mm') diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index a5de55c04fb2..e539f27ec51b 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -33,6 +33,12 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); +/* + * Early copy from unmapped memory to kernel mapped memory. + */ +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); + #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index e10ccd299d66..a0baeb4be934 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -217,6 +217,28 @@ early_memremap(resource_size_t phys_addr, unsigned long size) return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } + +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = src & ~PAGE_MASK; + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + p = early_memremap(src & PAGE_MASK, clen + slop); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + #else /* CONFIG_MMU */ void __init __iomem * -- cgit v1.2.3-59-g8ed1b From 7d1900c744b2e4687b3e467edf58373c02bcf22d Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:10 -0700 Subject: mm/hwpoison: fix failure to split thp w/ refcount held THP pages will get a refcount in madvise_hwpoison() w/ MF_COUNT_INCREASED flag, however, the refcount is still held when fail to split THP pages. Fix it by reducing the refcount of THP pages when fail to split THP. Signed-off-by: Wanpeng Li Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 016c814101ed..8ad923a93539 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1725,6 +1725,8 @@ int soft_offline_page(struct page *page, int flags) if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { pr_info("soft offline: %#lx: failed to split THP\n", pfn); + if (flags & MF_COUNT_INCREASED) + put_page(page); return -EBUSY; } } -- cgit v1.2.3-59-g8ed1b From 1e0e635be82132167a134b5a9c884e70e61f8373 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:13 -0700 Subject: mm/hwpoison: fix PageHWPoison test/set race There is a race between madvise_hwpoison path and memory_failure: CPU0 CPU1 madvise_hwpoison get_user_pages_fast PageHWPoison check (false) memory_failure TestSetPageHWPoison soft_offline_page PageHWPoison check (true) return -EBUSY (without put_page) Signed-off-by: Wanpeng Li Suggested-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 8ad923a93539..863544d84a09 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1719,6 +1719,8 @@ int soft_offline_page(struct page *page, int flags) if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); + if (flags & MF_COUNT_INCREASED) + put_page(page); return -EBUSY; } if (!PageHuge(page) && PageTransHuge(hpage)) { -- cgit v1.2.3-59-g8ed1b From 94bf4ec84a84d3ab2513b4e681fd3d083328d76d Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:15 -0700 Subject: mm/hwpoison: introduce put_hwpoison_page to put refcount for memory error handling Introduce put_hwpoison_page to put refcount for memory error handling. Signed-off-by: Wanpeng Li Suggested-by: Naoya Horiguchi Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/memory-failure.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index bab8ff89da50..11df1a8ea38b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2169,6 +2169,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); +extern void put_hwpoison_page(struct page *page); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 863544d84a09..5ceb8253e33b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -934,6 +934,27 @@ int get_hwpoison_page(struct page *page) } EXPORT_SYMBOL_GPL(get_hwpoison_page); +/** + * put_hwpoison_page() - Put refcount for memory error handling: + * @page: raw error page (hit by memory error) + */ +void put_hwpoison_page(struct page *page) +{ + struct page *head = compound_head(page); + + if (PageHuge(head)) { + put_page(head); + return; + } + + if (PageTransHuge(head)) + if (page != head) + put_page(head); + + put_page(page); +} +EXPORT_SYMBOL_GPL(put_hwpoison_page); + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. -- cgit v1.2.3-59-g8ed1b From be91748fa6ca6909853c3dc630d65e45084962d7 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:18 -0700 Subject: mm/hwpoison: fix refcount of THP head page in no-injection case Hwpoison injection takes a refcount of target page and another refcount of head page of THP if the target page is the tail page of a THP. However, current code doesn't release the refcount of head page if the THP is not supported to be injected wrt hwpoison filter. Fix it by reducing the refcount of head page if the target page is the tail page of a THP and it is not supported to be injected. Signed-off-by: Wanpeng Li Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hwpoison-inject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index bf73ac17dad4..aeba0edd6e44 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -58,7 +58,7 @@ inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); return memory_failure(pfn, 18, MF_COUNT_INCREASED); put_out: - put_page(p); + put_hwpoison_page(p); return 0; } -- cgit v1.2.3-59-g8ed1b From 665d9da7f0a9bd80b64d0024630806e45c7ff7d7 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:21 -0700 Subject: mm/hwpoison: replace most of put_page in memory error handling by put_hwpoison_page Replace most instances of put_page() in memory error handling with put_hwpoison_page(). Signed-off-by: Wanpeng Li Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5ceb8253e33b..5420d3819adf 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1174,9 +1174,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) atomic_long_sub(nr_pages, &num_poisoned_pages); - put_page(p); - if (p != hpage) - put_page(hpage); + put_hwpoison_page(p); return -EBUSY; } VM_BUG_ON_PAGE(!page_count(p), p); @@ -1237,14 +1235,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); atomic_long_sub(nr_pages, &num_poisoned_pages); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) atomic_long_sub(nr_pages, &num_poisoned_pages); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } @@ -1258,7 +1256,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } /* @@ -1492,9 +1490,9 @@ int unpoison_memory(unsigned long pfn) } unlock_page(page); - put_page(page); + put_hwpoison_page(page); if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) - put_page(page); + put_hwpoison_page(page); return 0; } @@ -1554,7 +1552,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) /* * Try to free it. */ - put_page(page); + put_hwpoison_page(page); shake_page(page, 1); /* @@ -1563,7 +1561,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) ret = __get_any_page(page, pfn, 0); if (!PageLRU(page)) { /* Drop page reference which is from __get_any_page() */ - put_page(page); + put_hwpoison_page(page); pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", pfn, page->flags); return -EIO; @@ -1586,7 +1584,7 @@ static int soft_offline_huge_page(struct page *page, int flags) lock_page(hpage); if (PageHWPoison(hpage)) { unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); return -EBUSY; } @@ -1597,7 +1595,7 @@ static int soft_offline_huge_page(struct page *page, int flags) * get_any_page() and isolate_huge_page() takes a refcount each, * so need to drop one here. */ - put_page(hpage); + put_hwpoison_page(hpage); if (!ret) { pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); return -EBUSY; @@ -1646,7 +1644,7 @@ static int __soft_offline_page(struct page *page, int flags) wait_on_page_writeback(page); if (PageHWPoison(page)) { unlock_page(page); - put_page(page); + put_hwpoison_page(page); pr_info("soft offline: %#lx page already poisoned\n", pfn); return -EBUSY; } @@ -1661,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags) * would need to fix isolation locking first. */ if (ret == 1) { - put_page(page); + put_hwpoison_page(page); pr_info("soft_offline: %#lx: invalidated\n", pfn); SetPageHWPoison(page); atomic_long_inc(&num_poisoned_pages); @@ -1678,7 +1676,7 @@ static int __soft_offline_page(struct page *page, int flags) * Drop page reference which is came from get_any_page() * successful isolate_lru_page() already took another one. */ - put_page(page); + put_hwpoison_page(page); if (!ret) { LIST_HEAD(pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + @@ -1741,7 +1739,7 @@ int soft_offline_page(struct page *page, int flags) if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); if (flags & MF_COUNT_INCREASED) - put_page(page); + put_hwpoison_page(page); return -EBUSY; } if (!PageHuge(page) && PageTransHuge(hpage)) { @@ -1749,7 +1747,7 @@ int soft_offline_page(struct page *page, int flags) pr_info("soft offline: %#lx: failed to split THP\n", pfn); if (flags & MF_COUNT_INCREASED) - put_page(page); + put_hwpoison_page(page); return -EBUSY; } } -- cgit v1.2.3-59-g8ed1b From 8e30456b6c56029ecbb43b777519175e478adfbf Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 8 Sep 2015 15:03:24 -0700 Subject: mm/hwpoison: introduce num_poisoned_pages wrappers num_poisoned_pages counter will be changed outside mm/memory-failure.c by a subsequent patch, so this patch prepares wrappers to manipulate it. Signed-off-by: Naoya Horiguchi Tested-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swapops.h | 23 +++++++++++++++++++++++ mm/memory-failure.c | 30 ++++++++++++++---------------- 2 files changed, 37 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/include/linux/swapops.h b/include/linux/swapops.h index cedf3d3c373f..ec04669f2a3b 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry) #endif #ifdef CONFIG_MEMORY_FAILURE + +extern atomic_long_t num_poisoned_pages __read_mostly; + /* * Support for hardware poisoned pages */ @@ -177,6 +180,26 @@ static inline int is_hwpoison_entry(swp_entry_t entry) { return swp_type(entry) == SWP_HWPOISON; } + +static inline void num_poisoned_pages_inc(void) +{ + atomic_long_inc(&num_poisoned_pages); +} + +static inline void num_poisoned_pages_dec(void) +{ + atomic_long_dec(&num_poisoned_pages); +} + +static inline void num_poisoned_pages_add(long num) +{ + atomic_long_add(num, &num_poisoned_pages); +} + +static inline void num_poisoned_pages_sub(long num) +{ + atomic_long_sub(num, &num_poisoned_pages); +} #else static inline swp_entry_t make_hwpoison_entry(struct page *page) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5420d3819adf..393ea13b0754 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1121,7 +1121,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) nr_pages = 1 << compound_order(hpage); else /* normal page or thp */ nr_pages = 1; - atomic_long_add(nr_pages, &num_poisoned_pages); + num_poisoned_pages_add(nr_pages); /* * We need/can do nothing about count=0 pages. @@ -1149,7 +1149,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (PageHWPoison(hpage)) { if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) || (p != hpage && TestSetPageHWPoison(hpage))) { - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); return 0; } @@ -1173,7 +1173,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) else pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); put_hwpoison_page(p); return -EBUSY; } @@ -1233,14 +1233,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); put_hwpoison_page(hpage); return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); put_hwpoison_page(hpage); return 0; @@ -1469,7 +1469,7 @@ int unpoison_memory(unsigned long pfn) return 0; } if (TestClearPageHWPoison(p)) - atomic_long_dec(&num_poisoned_pages); + num_poisoned_pages_dec(); pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); return 0; } @@ -1483,7 +1483,7 @@ int unpoison_memory(unsigned long pfn) */ if (TestClearPageHWPoison(page)) { pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); freeit = 1; if (PageHuge(page)) clear_page_hwpoison_huge_page(page); @@ -1619,11 +1619,10 @@ static int soft_offline_huge_page(struct page *page, int flags) if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + num_poisoned_pages_add(1 << compound_order(hpage)); } else { SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); } } return ret; @@ -1662,7 +1661,7 @@ static int __soft_offline_page(struct page *page, int flags) put_hwpoison_page(page); pr_info("soft_offline: %#lx: invalidated\n", pfn); SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); return 0; } @@ -1683,7 +1682,7 @@ static int __soft_offline_page(struct page *page, int flags) page_is_file_cache(page)); list_add(&page->lru, &pagelist); if (!TestSetPageHWPoison(page)) - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_dec(); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1699,7 +1698,7 @@ static int __soft_offline_page(struct page *page, int flags) if (ret > 0) ret = -EIO; if (TestClearPageHWPoison(page)) - atomic_long_dec(&num_poisoned_pages); + num_poisoned_pages_dec(); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", @@ -1765,11 +1764,10 @@ int soft_offline_page(struct page *page, int flags) if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); if (!dequeue_hwpoisoned_huge_page(hpage)) - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + num_poisoned_pages_add(1 << compound_order(hpage)); } else { if (!TestSetPageHWPoison(page)) - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); } } return ret; -- cgit v1.2.3-59-g8ed1b From da1b13ccfbebe0b9d69b5d61eff0a675e19e69a5 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 8 Sep 2015 15:03:27 -0700 Subject: mm/hwpoison: fix race between soft_offline_page and unpoison_memory Wanpeng Li reported a race between soft_offline_page() and unpoison_memory(), which causes the following kernel panic: BUG: Bad page state in process bash pfn:97000 page:ffffea00025c0000 count:0 mapcount:1 mapping: (null) index:0x7f4fdbe00 flags: 0x1fffff80080048(uptodate|active|swapbacked) page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set bad because of flags: flags: 0x40(active) Modules linked in: snd_hda_codec_hdmi i915 rpcsec_gss_krb5 nfsv4 dns_resolver bnep rfcomm nfsd bluetooth auth_rpcgss nfs_acl nfs rfkill lockd grace sunrpc i2c_algo_bit drm_kms_helper snd_hda_codec_realtek snd_hda_codec_generic drm snd_hda_intel fscache snd_hda_codec x86_pkg_temp_thermal coretemp kvm_intel snd_hda_core snd_hwdep kvm snd_pcm snd_seq_dummy snd_seq_oss crct10dif_pclmul snd_seq_midi crc32_pclmul snd_seq_midi_event ghash_clmulni_intel snd_rawmidi aesni_intel lrw gf128mul snd_seq glue_helper ablk_helper snd_seq_device cryptd fuse snd_timer dcdbas serio_raw mei_me parport_pc snd mei ppdev i2c_core video lp soundcore parport lpc_ich shpchp mfd_core ext4 mbcache jbd2 sd_mod e1000e ahci ptp libahci crc32c_intel libata pps_core CPU: 3 PID: 2211 Comm: bash Not tainted 4.2.0-rc5-mm1+ #45 Hardware name: Dell Inc. OptiPlex 7020/0F5C5X, BIOS A03 01/08/2015 Call Trace: dump_stack+0x48/0x5c bad_page+0xe6/0x140 free_pages_prepare+0x2f9/0x320 ? uncharge_list+0xdd/0x100 free_hot_cold_page+0x40/0x170 __put_single_page+0x20/0x30 put_page+0x25/0x40 unmap_and_move+0x1a6/0x1f0 migrate_pages+0x100/0x1d0 ? kill_procs+0x100/0x100 ? unlock_page+0x6f/0x90 __soft_offline_page+0x127/0x2a0 soft_offline_page+0xa6/0x200 This race is explained like below: CPU0 CPU1 soft_offline_page __soft_offline_page TestSetPageHWPoison unpoison_memory PageHWPoison check (true) TestClearPageHWPoison put_page -> release refcount held by get_hwpoison_page in unpoison_memory put_page -> release refcount held by isolate_lru_page in __soft_offline_page migrate_pages The second put_page() releases refcount held by isolate_lru_page() which will lead to unmap_and_move() releases the last refcount of page and w/ mapcount still 1 since try_to_unmap() is not called if there is only one user map the page. Anyway, the page refcount and mapcount will still mess if the page is mapped by multiple users. This race was introduced by commit 4491f71260 ("mm/memory-failure: set PageHWPoison before migrate_pages()"), which focuses on preventing the reuse of successfully migrated page. Before this commit we prevent the reuse by changing the migratetype to MIGRATE_ISOLATE during soft offlining, which has the following problems, so simply reverting the commit is not a best option: 1) it doesn't eliminate the reuse completely, because set_migratetype_isolate() can fail to set MIGRATE_ISOLATE to the target page if the pageblock of the page contains one or more unmovable pages (i.e. has_unmovable_pages() returns true). 2) the original code changes migratetype to MIGRATE_ISOLATE forcibly, and sets it to MIGRATE_MOVABLE forcibly after soft offline, regardless of the original migratetype state, which could impact other subsystems like memory hotplug or compaction. This patch moves PageSetHWPoison just after put_page() in unmap_and_move(), which closes up the reported race window and minimizes another race window b/w SetPageHWPoison and reallocation (which causes the reuse of soft-offlined page.) The latter race window still exists but it's acceptable, because it's rare and effectively the same as ordinary "containment failure" case even if it happens, so keep the window open is acceptable. Fixes: 4491f71260 ("mm/memory-failure: set PageHWPoison before migrate_pages()") Signed-off-by: Wanpeng Li Signed-off-by: Naoya Horiguchi Reported-by: Wanpeng Li Tested-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swapops.h | 14 ++++++++++++++ mm/memory-failure.c | 4 ---- mm/migrate.c | 9 +++++---- 3 files changed, 19 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/include/linux/swapops.h b/include/linux/swapops.h index ec04669f2a3b..5c3a5f3e7eec 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -181,6 +181,11 @@ static inline int is_hwpoison_entry(swp_entry_t entry) return swp_type(entry) == SWP_HWPOISON; } +static inline bool test_set_page_hwpoison(struct page *page) +{ + return TestSetPageHWPoison(page); +} + static inline void num_poisoned_pages_inc(void) { atomic_long_inc(&num_poisoned_pages); @@ -211,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp) { return 0; } + +static inline bool test_set_page_hwpoison(struct page *page) +{ + return false; +} + +static inline void num_poisoned_pages_inc(void) +{ +} #endif #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 393ea13b0754..b0664c23838b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1681,8 +1681,6 @@ static int __soft_offline_page(struct page *page, int flags) inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); - if (!TestSetPageHWPoison(page)) - num_poisoned_pages_dec(); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1697,8 +1695,6 @@ static int __soft_offline_page(struct page *page, int flags) pfn, ret, page->flags); if (ret > 0) ret = -EIO; - if (TestClearPageHWPoison(page)) - num_poisoned_pages_dec(); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", diff --git a/mm/migrate.c b/mm/migrate.c index 5c08cab5419e..918defbdda0e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -880,8 +880,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes or remove ptes */ if (page_mapped(page)) { try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| - TTU_IGNORE_HWPOISON); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); page_was_mapped = 1; } @@ -952,9 +951,11 @@ out: dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); /* Soft-offlined page shouldn't go through lru cache list */ - if (reason == MR_MEMORY_FAILURE) + if (reason == MR_MEMORY_FAILURE) { put_page(page); - else + if (!test_set_page_hwpoison(page)) + num_poisoned_pages_inc(); + } else putback_lru_page(page); } -- cgit v1.2.3-59-g8ed1b From 230ac719c500e58e71342be381ad2042a8cffc42 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 8 Sep 2015 15:03:29 -0700 Subject: mm/hwpoison: don't try to unpoison containment-failed pages memory_failure() can be called at any page at any time, which means that we can't eliminate the possibility of containment failure. In such case the best option is to leak the page intentionally (and never touch it later.) We have an unpoison function for testing, and it cannot handle such containment-failed pages, which results in kernel panic (visible with various calltraces.) So this patch suggests that we limit the unpoisonable pages to properly contained pages and ignore any other ones. Testers are recommended to keep in mind that there're un-unpoisonable pages when writing test programs. Signed-off-by: Naoya Horiguchi Tested-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b0664c23838b..bba2d7c2c9ce 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1445,6 +1445,22 @@ int unpoison_memory(unsigned long pfn) return 0; } + if (page_count(page) > 1) { + pr_info("MCE: Someone grabs the hwpoison page %#lx\n", pfn); + return 0; + } + + if (page_mapped(page)) { + pr_info("MCE: Someone maps the hwpoison page %#lx\n", pfn); + return 0; + } + + if (page_mapping(page)) { + pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n", + pfn); + return 0; + } + /* * unpoison_memory() can encounter thp only when the thp is being * worked by memory_failure() and the page lock is not held yet. -- cgit v1.2.3-59-g8ed1b From 567d117b8b2ab1c3437acc4799505a59bfa5722b Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Tue, 8 Sep 2015 15:03:33 -0700 Subject: mm/memblock.c: rename local variable of memblock_type to 'type' Since commit e3239ff92a17 ("memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region"), all local variables of the membock_type type were renamed to 'type'. This commit renames all remaining local variables with the memblock_type type to the same view. Signed-off-by: Alexander Kuleshov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 509255223688..69babe22eef7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -615,14 +615,14 @@ static int __init_memblock memblock_add_region(phys_addr_t base, int nid, unsigned long flags) { - struct memblock_type *_rgn = &memblock.memory; + struct memblock_type *type = &memblock.memory; memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", (unsigned long long)base, (unsigned long long)base + size - 1, flags, (void *)_RET_IP_); - return memblock_add_range(_rgn, base, size, nid, flags); + return memblock_add_range(type, base, size, nid, flags); } int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) @@ -835,10 +835,10 @@ void __init_memblock __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end) { - struct memblock_type *rsv = &memblock.reserved; + struct memblock_type *type = &memblock.reserved; - if (*idx >= 0 && *idx < rsv->cnt) { - struct memblock_region *r = &rsv->regions[*idx]; + if (*idx >= 0 && *idx < type->cnt) { + struct memblock_region *r = &type->regions[*idx]; phys_addr_t base = r->base; phys_addr_t size = r->size; -- cgit v1.2.3-59-g8ed1b From 44a30220bc0a171c010e8df63d144655abdafe61 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Tue, 8 Sep 2015 15:03:33 -0700 Subject: shmem: recalculate file inode when fstat Shmem uses shmem_recalc_inode to update i_blocks when it allocates page, undoes range or swaps. But mm can drop clean page without notifying shmem. This makes fstat sometimes return out-of-date block size. The problem can be partially solved when we add inode_operations->getattr which calls shmem_recalc_inode to update i_blocks for fstat. shmem_recalc_inode also updates counter used by statfs and vm_committed_as. For them the situation is not changed. They still suffer from the discrepancy after dropping clean page and before the function is called by aforementioned triggers. Signed-off-by: Yu Zhao Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index dbe0c1e8349c..48ce82926d93 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -542,6 +542,21 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) } EXPORT_SYMBOL_GPL(shmem_truncate_range); +static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + struct shmem_inode_info *info = SHMEM_I(inode); + + spin_lock(&info->lock); + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + + generic_fillattr(inode, stat); + + return 0; +} + static int shmem_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); @@ -3122,6 +3137,7 @@ static const struct file_operations shmem_file_operations = { }; static const struct inode_operations shmem_inode_operations = { + .getattr = shmem_getattr, .setattr = shmem_setattr, #ifdef CONFIG_TMPFS_XATTR .setxattr = shmem_setxattr, -- cgit v1.2.3-59-g8ed1b From e3975891254e08d220ddcafca93a0e05d9560bfb Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Tue, 8 Sep 2015 15:03:38 -0700 Subject: mm/mmap.c: simplify the failure return working flow __split_vma() doesn't need out_err label, neither need initializing err. copy_vma() can return NULL directly when kmem_cache_alloc() fails. Signed-off-by: Chen Gang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 52a2373d0ed4..7a3b12399f06 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2455,7 +2455,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; - int err = -ENOMEM; + int err; if (is_vm_hugetlb_page(vma) && (addr & ~(huge_page_mask(hstate_vma(vma))))) @@ -2463,7 +2463,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) - goto out_err; + return -ENOMEM; /* most fields are the same, copy all, and then fixup */ *new = *vma; @@ -2511,7 +2511,6 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, mpol_put(vma_policy(new)); out_free_vma: kmem_cache_free(vm_area_cachep, new); - out_err: return err; } @@ -2952,30 +2951,31 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - if (new_vma) { - *new_vma = *vma; - new_vma->vm_start = addr; - new_vma->vm_end = addr + len; - new_vma->vm_pgoff = pgoff; - if (vma_dup_policy(vma, new_vma)) - goto out_free_vma; - INIT_LIST_HEAD(&new_vma->anon_vma_chain); - if (anon_vma_clone(new_vma, vma)) - goto out_free_mempol; - if (new_vma->vm_file) - get_file(new_vma->vm_file); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); - *need_rmap_locks = false; - } + if (!new_vma) + goto out; + *new_vma = *vma; + new_vma->vm_start = addr; + new_vma->vm_end = addr + len; + new_vma->vm_pgoff = pgoff; + if (vma_dup_policy(vma, new_vma)) + goto out_free_vma; + INIT_LIST_HEAD(&new_vma->anon_vma_chain); + if (anon_vma_clone(new_vma, vma)) + goto out_free_mempol; + if (new_vma->vm_file) + get_file(new_vma->vm_file); + if (new_vma->vm_ops && new_vma->vm_ops->open) + new_vma->vm_ops->open(new_vma); + vma_link(mm, new_vma, prev, rb_link, rb_parent); + *need_rmap_locks = false; } return new_vma; - out_free_mempol: +out_free_mempol: mpol_put(vma_policy(new_vma)); - out_free_vma: +out_free_vma: kmem_cache_free(vm_area_cachep, new_vma); +out: return NULL; } -- cgit v1.2.3-59-g8ed1b From 21cd3a604797c2774676926a95a3d17d4cd5cbb3 Mon Sep 17 00:00:00 2001 From: Wang Kai Date: Tue, 8 Sep 2015 15:03:41 -0700 Subject: kmemleak: record accurate early log buffer count and report when exceeded In log_early function, crt_early_log should also count once when 'crt_early_log >= ARRAY_SIZE(early_log)'. Otherwise the reported count from kmemleak_init is one less than 'actual number'. Then, in kmemleak_init, if early_log buffer size equal actual number, kmemleak will init sucessful, so change warning condition to 'crt_early_log > ARRAY_SIZE(early_log)'. Signed-off-by: Wang Kai Acked-by: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kmemleak.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index cf79f110157c..f532f6a37b55 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -838,6 +838,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size, } if (crt_early_log >= ARRAY_SIZE(early_log)) { + crt_early_log++; kmemleak_disable(); return; } @@ -1882,7 +1883,7 @@ void __init kmemleak_init(void) object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); - if (crt_early_log >= ARRAY_SIZE(early_log)) + if (crt_early_log > ARRAY_SIZE(early_log)) pr_warning("Early log buffer exceeded (%d), please increase " "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); -- cgit v1.2.3-59-g8ed1b From 26f5d7609f03ad8d6dc552458e4e371a62416b37 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 8 Sep 2015 15:03:44 -0700 Subject: list_lru: don't call list_lru_from_kmem if the list_head is empty If the list_head is empty then we'll have called list_lru_from_kmem for nothing. Move that call inside of the list_empty if block. Signed-off-by: Jeff Layton Reviewed-by: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/list_lru.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/list_lru.c b/mm/list_lru.c index 909eca2c820e..e1da19fac1b3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -99,8 +99,8 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) struct list_lru_one *l; spin_lock(&nlru->lock); - l = list_lru_from_kmem(nlru, item); if (list_empty(item)) { + l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; spin_unlock(&nlru->lock); @@ -118,8 +118,8 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) struct list_lru_one *l; spin_lock(&nlru->lock); - l = list_lru_from_kmem(nlru, item); if (!list_empty(item)) { + l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; spin_unlock(&nlru->lock); -- cgit v1.2.3-59-g8ed1b From 7fadc820222497eac234d1d51a66517c00a6ca4c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 8 Sep 2015 15:03:46 -0700 Subject: mm, vmscan: unlock page while waiting on writeback This is merely a politeness: I've not found that shrink_page_list() leads to deadlock with the page it holds locked across wait_on_page_writeback(); but nevertheless, why hold others off by keeping the page locked there? And while we're at it: remove the mistaken "not " from the commentary on this Case 3 (and a distracting blank line from Case 2, if I may). Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 8276a3a615ca..2d978b28a410 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -985,7 +985,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * 3) Legacy memcg encounters a page that is not already marked + * 3) Legacy memcg encounters a page that is already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many * pages are in writeback and there is nothing else to @@ -1015,12 +1015,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ SetPageReclaim(page); nr_writeback++; - goto keep_locked; /* Case 3 above */ } else { + unlock_page(page); wait_on_page_writeback(page); + /* then go back and try same page again */ + list_add_tail(&page->lru, page_list); + continue; } } -- cgit v1.2.3-59-g8ed1b From 96db800f5d73cd5c49461253d45766e094f0f8c2 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 8 Sep 2015 15:03:50 -0700 Subject: mm: rename alloc_pages_exact_node() to __alloc_pages_node() alloc_pages_exact_node() was introduced in commit 6484eb3e2a81 ("page allocator: do not check NUMA node ID when the caller knows the node is valid") as an optimized variant of alloc_pages_node(), that doesn't fallback to current node for nid == NUMA_NO_NODE. Unfortunately the name of the function can easily suggest that the allocation is restricted to the given node and fails otherwise. In truth, the node is only preferred, unless __GFP_THISNODE is passed among the gfp flags. The misleading name has lead to mistakes in the past, see for example commits 5265047ac301 ("mm, thp: really limit transparent hugepage allocation to local node") and b360edb43f8e ("mm, mempolicy: migrate_to_node should only migrate to node"). Another issue with the name is that there's a family of alloc_pages_exact*() functions where 'exact' means exact size (instead of page order), which leads to more confusion. To prevent further mistakes, this patch effectively renames alloc_pages_exact_node() to __alloc_pages_node() to better convey that it's an optimized variant of alloc_pages_node() not intended for general usage. Both functions get described in comments. It has been also considered to really provide a convenience function for allocations restricted to a node, but the major opinion seems to be that __GFP_THISNODE already provides that functionality and we shouldn't duplicate the API needlessly. The number of users would be small anyway. Existing callers of alloc_pages_exact_node() are simply converted to call __alloc_pages_node(), with the exception of sba_alloc_coherent() which open-codes the check for NUMA_NO_NODE, so it is converted to use alloc_pages_node() instead. This means it no longer performs some VM_BUG_ON checks, and since the current check for nid in alloc_pages_node() uses a 'nid < 0' comparison (which includes NUMA_NO_NODE), it may hide wrong values which would be previously exposed. Both differences will be rectified by the next patch. To sum up, this patch makes no functional changes, except temporarily hiding potentially buggy callers. Restricting the checks in alloc_pages_node() is left for the next patch which can in turn expose more existing buggy callers. Signed-off-by: Vlastimil Babka Acked-by: Johannes Weiner Acked-by: Robin Holt Acked-by: Michal Hocko Acked-by: Christoph Lameter Acked-by: Michael Ellerman Cc: Mel Gorman Cc: David Rientjes Cc: Greg Thelen Cc: Aneesh Kumar K.V Cc: Pekka Enberg Cc: Joonsoo Kim Cc: Naoya Horiguchi Cc: Tony Luck Cc: Fenghua Yu Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Gleb Natapov Cc: Paolo Bonzini Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Cliff Whickman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/hp/common/sba_iommu.c | 6 +----- arch/ia64/kernel/uncached.c | 2 +- arch/ia64/sn/pci/pci_dma.c | 2 +- arch/powerpc/platforms/cell/ras.c | 2 +- arch/x86/kvm/vmx.c | 2 +- drivers/misc/sgi-xp/xpc_uv.c | 2 +- include/linux/gfp.h | 23 +++++++++++++++-------- kernel/profile.c | 8 ++++---- mm/filemap.c | 2 +- mm/huge_memory.c | 2 +- mm/hugetlb.c | 4 ++-- mm/memory-failure.c | 2 +- mm/mempolicy.c | 4 ++-- mm/migrate.c | 4 ++-- mm/page_alloc.c | 2 -- mm/slab.c | 2 +- mm/slob.c | 4 ++-- mm/slub.c | 2 +- 18 files changed, 38 insertions(+), 37 deletions(-) (limited to 'mm') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 344387a55406..a6d6190c9d24 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1140,13 +1140,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, #ifdef CONFIG_NUMA { - int node = ioc->node; struct page *page; - if (node == NUMA_NO_NODE) - node = numa_node_id(); - - page = alloc_pages_exact_node(node, flags, get_order(size)); + page = alloc_pages_node(ioc->node, flags, get_order(size)); if (unlikely(!page)) return NULL; diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 20e8a9b21d75..f3976da36721 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -97,7 +97,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d0853e8e8623..8f59907007cb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -92,7 +92,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { - struct page *p = alloc_pages_exact_node(node, + struct page *p = __alloc_pages_node(node, flags, get_order(size)); if (likely(p)) diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index e865d748179b..2d4f60c0119a 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -123,7 +123,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_exact_node(area->nid, + area->pages = __alloc_pages_node(area->nid, GFP_KERNEL|__GFP_THISNODE, area->order); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4a4eec30cc08..148ea2016022 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3150,7 +3150,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) struct page *pages; struct vmcs *vmcs; - pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); + pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 95c894482fdd..340b44d9e8cf 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -239,7 +239,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, pg_order); if (page == NULL) { diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 3bd64b115999..d2c142bc872e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -303,20 +303,28 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } -static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, - unsigned int order) +/* + * Allocate pages, preferring the node given as nid. The node must be valid and + * online. For more general interface, see alloc_pages_node(). + */ +static inline struct page * +__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { - /* Unknown node is current node */ - if (nid < 0) - nid = numa_node_id(); + VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } -static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, +/* + * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, + * prefer the current CPU's node. + */ +static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { - VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); + /* Unknown node is current node */ + if (nid < 0) + nid = numa_node_id(); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } @@ -357,7 +365,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); -/* This is different from alloc_pages_exact_node !!! */ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ diff --git a/kernel/profile.c b/kernel/profile.c index a7bcd28d6e9f..99513e1160e5 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -339,7 +339,7 @@ static int profile_cpu_callback(struct notifier_block *info, node = cpu_to_mem(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = alloc_pages_exact_node(node, + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) @@ -347,7 +347,7 @@ static int profile_cpu_callback(struct notifier_block *info, per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = alloc_pages_exact_node(node, + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) @@ -543,14 +543,14 @@ static int create_hash_tables(void) int node = cpu_to_mem(cpu); struct page *page; - page = alloc_pages_exact_node(node, + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); - page = alloc_pages_exact_node(node, + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) diff --git a/mm/filemap.c b/mm/filemap.c index 30d69c0c5a38..72940fb38666 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -674,7 +674,7 @@ struct page *__page_cache_alloc(gfp_t gfp) do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); - page = alloc_pages_exact_node(n, gfp, 0); + page = __alloc_pages_node(n, gfp, 0); } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); return page; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 71a4822c832b..883f613ada7e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2414,7 +2414,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, */ up_read(&mm->mmap_sem); - *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER); + *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cd1280c487ff..999fb0aef8f1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1331,7 +1331,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); @@ -1483,7 +1483,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); else - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index bba2d7c2c9ce..eeda6485e76c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1521,7 +1521,7 @@ static struct page *new_page(struct page *p, unsigned long private, int **x) return alloc_huge_page_node(page_hstate(compound_head(p)), nid); else - return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); + return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0); } /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d6f2caee28c0..87a177917cb2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -942,7 +942,7 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x return alloc_huge_page_node(page_hstate(compound_head(page)), node); else - return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE | + return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } @@ -1998,7 +1998,7 @@ retry_cpuset: nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); - page = alloc_pages_exact_node(hpage_node, + page = __alloc_pages_node(hpage_node, gfp | __GFP_THISNODE, order); goto out; } diff --git a/mm/migrate.c b/mm/migrate.c index 918defbdda0e..02ce25df16c2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1195,7 +1195,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, return alloc_huge_page_node(page_hstate(compound_head(p)), pm->node); else - return alloc_pages_exact_node(pm->node, + return __alloc_pages_node(pm->node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } @@ -1555,7 +1555,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, int nid = (int) data; struct page *newpage; - newpage = alloc_pages_exact_node(nid, + newpage = __alloc_pages_node(nid, (GFP_HIGHUSER_MOVABLE | __GFP_THISNODE | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 252665d553b4..bdaa0cf8fd41 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3511,8 +3511,6 @@ EXPORT_SYMBOL(alloc_pages_exact); * * Like alloc_pages_exact(), but try to allocate on node nid first before falling * back. - * Note this is not alloc_pages_exact_node() which allocates on a specific node, - * but is not exact. */ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) { diff --git a/mm/slab.c b/mm/slab.c index 60c936938b84..c77ebe6cc87c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1595,7 +1595,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, if (memcg_charge_slab(cachep, flags, cachep->gfporder)) return NULL; - page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); + page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); if (!page) { memcg_uncharge_slab(cachep, cachep->gfporder); slab_out_of_memory(cachep, flags, nodeid); diff --git a/mm/slob.c b/mm/slob.c index 165bbd3cd606..0d7e5df74d1f 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -45,7 +45,7 @@ * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly - * provided, alloc_pages_exact_node() with the specified node id is used + * provided, __alloc_pages_node() with the specified node id is used * instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * @@ -193,7 +193,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) #ifdef CONFIG_NUMA if (node != NUMA_NO_NODE) - page = alloc_pages_exact_node(node, gfp, order); + page = __alloc_pages_node(node, gfp, order); else #endif page = alloc_pages(gfp, order); diff --git a/mm/slub.c b/mm/slub.c index 084184e706c6..f614b5dc396b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1334,7 +1334,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, if (node == NUMA_NO_NODE) page = alloc_pages(flags, order); else - page = alloc_pages_exact_node(node, flags, order); + page = __alloc_pages_node(node, flags, order); if (!page) memcg_uncharge_slab(s, order); -- cgit v1.2.3-59-g8ed1b From 1a16718cf7f4f48ee2aa2cfd9a961c6b433a7b5b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 8 Sep 2015 15:03:59 -0700 Subject: mm/compaction: correct to flush migrated pages if pageblock skip happens We cache isolate_start_pfn before entering isolate_migratepages(). If pageblock is skipped in isolate_migratepages() due to whatever reason, cc->migrate_pfn can be far from isolate_start_pfn hence we flush pages that were freed. For example, the following scenario can be possible: - assume order-9 compaction, pageblock order is 9 - start_isolate_pfn is 0x200 - isolate_migratepages() - skip a number of pageblocks - start to isolate from pfn 0x600 - cc->migrate_pfn = 0x620 - return - last_migrated_pfn is set to 0x200 - check flushing condition - current_block_start is set to 0x600 - last_migrated_pfn < current_block_start then do useless flush This wrong flush would not help the performance and success rate so this patch tries to fix it. One simple way to know the exact position where we start to isolate migratable pages is that we cache it in isolate_migratepages() before entering actual isolation. This patch implements that and fixes the problem. Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Rik van Riel Cc: David Rientjes Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 30 +++++++++++++++--------------- mm/internal.h | 1 + 2 files changed, 16 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 8f64d3533990..c5c627aae996 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1115,6 +1115,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; + unsigned long isolate_start_pfn; struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | @@ -1163,6 +1164,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, continue; /* Perform the isolation */ + isolate_start_pfn = low_pfn; low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, isolate_mode); @@ -1171,6 +1173,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, return ISOLATE_ABORT; } + /* + * Record where we could have freed pages by migration and not + * yet flushed them to buddy allocator. + * - this is the lowest page that could have been isolated and + * then freed by migration. + */ + if (cc->nr_migratepages && !cc->last_migrated_pfn) + cc->last_migrated_pfn = isolate_start_pfn; + /* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should @@ -1342,7 +1353,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long end_pfn = zone_end_pfn(zone); const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; - unsigned long last_migrated_pfn = 0; ret = compaction_suitable(zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -1380,6 +1390,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } + cc->last_migrated_pfn = 0; trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync); @@ -1389,7 +1400,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) while ((ret = compact_finished(zone, cc, migratetype)) == COMPACT_CONTINUE) { int err; - unsigned long isolate_start_pfn = cc->migrate_pfn; switch (isolate_migratepages(zone, cc)) { case ISOLATE_ABORT: @@ -1429,16 +1439,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } } - /* - * Record where we could have freed pages by migration and not - * yet flushed them to buddy allocator. We use the pfn that - * isolate_migratepages() started from in this loop iteration - * - this is the lowest page that could have been isolated and - * then freed by migration. - */ - if (!last_migrated_pfn) - last_migrated_pfn = isolate_start_pfn; - check_drain: /* * Has the migration scanner moved away from the previous @@ -1447,18 +1447,18 @@ check_drain: * compact_finished() can detect immediately if allocation * would succeed. */ - if (cc->order > 0 && last_migrated_pfn) { + if (cc->order > 0 && cc->last_migrated_pfn) { int cpu; unsigned long current_block_start = cc->migrate_pfn & ~((1UL << cc->order) - 1); - if (last_migrated_pfn < current_block_start) { + if (cc->last_migrated_pfn < current_block_start) { cpu = get_cpu(); lru_add_drain_cpu(cpu); drain_local_pages(zone); put_cpu(); /* No more flushing until we migrate again */ - last_migrated_pfn = 0; + cc->last_migrated_pfn = 0; } } diff --git a/mm/internal.h b/mm/internal.h index 1195dd2d6a2b..bc0fa9a69e46 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -182,6 +182,7 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ + unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ int order; /* order a direct compactor needs */ -- cgit v1.2.3-59-g8ed1b From c11539315129b599aae6d9bc0f941dee5559ec58 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Tue, 8 Sep 2015 15:04:02 -0700 Subject: mm/memblock.c: fiy typos in comments s/succees/success/ Signed-off-by: Alexander Kuleshov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 69babe22eef7..31b06c67b1b1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -762,7 +762,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) * * This function isolates region [@base, @base + @size), and sets/clears flag * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ static int __init_memblock memblock_setclr_flag(phys_addr_t base, phys_addr_t size, int set, int flag) @@ -789,7 +789,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base, * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) { @@ -801,7 +801,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) { @@ -813,7 +813,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { -- cgit v1.2.3-59-g8ed1b From bde43c6c9f4f360ae549a0ed9f10a3e62e363aca Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Tue, 8 Sep 2015 15:04:05 -0700 Subject: mm/khugepaged: allow interruption of allocation sleep again Commit 1dfb059b9438 ("thp: reduce khugepaged freezing latency") fixed khugepaged to do not block a system suspend. But the result is that it could not get interrupted before the given timeout because the condition for the wait event is "false". This patch puts back the original approach but it uses freezable_schedule_timeout_interruptible() instead of schedule_timeout_interruptible(). It does the right thing. I am pretty sure that the freezable variant was not used in the original fix only because it was not available at that time. The regression has been there for ages. It was not critical. It just did the allocation throttling a little bit more aggressively. I found this problem when converting the kthread to kthread worker API and trying to understand the code. This bug is thought to have minimal userspace-visible impact. Somebody could set a high alloc_sleep value by mistake, and then try to fix it back, but khugepaged would keep sleeping until the high value expires. Signed-off-by: Petr Mladek Cc: Andrea Arcangeli Acked-by: Vlastimil Babka Cc: "Aneesh Kumar K.V" Cc: "Kirill A. Shutemov" Cc: David Rientjes Cc: Ebru Akagunduz Cc: Mel Gorman Cc: Jiri Kosina Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 883f613ada7e..b16279cbd91d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2326,8 +2326,12 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, static void khugepaged_alloc_sleep(void) { - wait_event_freezable_timeout(khugepaged_wait, false, - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + DEFINE_WAIT(wait); + + add_wait_queue(&khugepaged_wait, &wait); + freezable_schedule_timeout_interruptible( + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + remove_wait_queue(&khugepaged_wait, &wait); } static int khugepaged_node_load[MAX_NUMNODES]; -- cgit v1.2.3-59-g8ed1b From c9d13f5fc748a02cb5917a798f065681007342b9 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Tue, 8 Sep 2015 15:04:08 -0700 Subject: mm/mmap.c:insert_vm_struct(): check for failure before setting values There's no point in initializing vma->vm_pgoff if the insertion attempt will be failing anyway. Run the checks before performing the initialization. Signed-off-by: Chen Gang Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 7a3b12399f06..b6be3249f0a9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2871,6 +2871,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; + if (find_vma_links(mm, vma->vm_start, vma->vm_end, + &prev, &rb_link, &rb_parent)) + return -ENOMEM; + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; + /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index @@ -2887,12 +2894,6 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) - return -ENOMEM; - if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, vma_pages(vma))) - return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; -- cgit v1.2.3-59-g8ed1b From 013110a73dcf970cb28c5b0a79f9eee577ea6aa2 Mon Sep 17 00:00:00 2001 From: Yaowei Bai Date: Tue, 8 Sep 2015 15:04:10 -0700 Subject: mm/page_alloc.c: fix a misleading comment The comment says that the per-cpu batchsize and zone watermarks are determined by present_pages which is definitely wrong, they are both calculated from managed_pages. Fix it. Signed-off-by: Yaowei Bai Acked-by: Michal Hocko Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 4 ++-- mm/page_alloc.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 9c3f2f8054b5..a4482fceacec 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -349,7 +349,7 @@ zone[i]'s protection[j] is calculated by following expression. (i < j): zone[i]->protection[j] - = (total sums of present_pages from zone[i+1] to zone[j] on the node) + = (total sums of managed_pages from zone[i+1] to zone[j] on the node) / lowmem_reserve_ratio[i]; (i = j): (should not be protected. = 0; @@ -360,7 +360,7 @@ The default values of lowmem_reserve_ratio[i] are 256 (if zone[i] means DMA or DMA32 zone) 32 (others). As above expression, they are reciprocal number of ratio. -256 means 1/256. # of protection pages becomes about "0.39%" of total present +256 means 1/256. # of protection pages becomes about "0.39%" of total managed pages of higher zones on the node. If you would like to protect more pages, smaller values are effective. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bdaa0cf8fd41..59abb47b70ee 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6022,7 +6022,7 @@ void __init mem_init_print_info(const char *str) * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved * - * The per-cpu batchsize and zone watermarks are determined by present_pages. + * The per-cpu batchsize and zone watermarks are determined by managed_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This * function may optionally be used to account for unfreeable pages in the -- cgit v1.2.3-59-g8ed1b From 34b100605cb7e201d5c4e39f54d0e11caa950733 Mon Sep 17 00:00:00 2001 From: Yaowei Bai Date: Tue, 8 Sep 2015 15:04:13 -0700 Subject: mm/page_alloc.c: change sysctl_lower_zone_reserve_ratio to sysctl_lowmem_reserve_ratio in comments We use sysctl_lowmem_reserve_ratio rather than sysctl_lower_zone_reserve_ratio to determine how aggressive the kernel is in defending lowmem from the possibility of being captured into pinned user memory. To avoid misleading, correct it in some comments. Signed-off-by: Yaowei Bai Acked-by: Michal Hocko Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59abb47b70ee..5e8e99dd595a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6075,7 +6075,7 @@ void __init page_alloc_init(void) } /* - * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio + * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio * or min_free_kbytes changes. */ static void calculate_totalreserve_pages(void) @@ -6119,7 +6119,7 @@ static void calculate_totalreserve_pages(void) /* * setup_per_zone_lowmem_reserve - called whenever - * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone + * sysctl_lowmem_reserve_ratio changes. Ensures that each zone * has a correct pages reserved value, so an adequate number of * pages are left in the zone after a successful __alloc_pages(). */ -- cgit v1.2.3-59-g8ed1b From b5685e9263a6f3a8da546b8a46382f18a63745c9 Mon Sep 17 00:00:00 2001 From: Xishi Qiu Date: Tue, 8 Sep 2015 15:04:16 -0700 Subject: memory-hotplug: fix comments in zone_spanned_pages_in_node() and zone_spanned_pages_in_node() When hot adding a node from add_memory(), we will add memblock first, so the node is not empty. But when called from cpu_up(), the node should be empty. Signed-off-by: Xishi Qiu Cc: Tang Chen Cc: Yasuaki Ishimatsu Cc: Naoya Horiguchi Cc: Vlastimil Babka Cc: Taku Izumi \ Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e8e99dd595a..b0fda2b9ca76 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5085,7 +5085,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, { unsigned long zone_start_pfn, zone_end_pfn; - /* When hotadd a new node, the node should be empty */ + /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; @@ -5152,7 +5152,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long zone_start_pfn, zone_end_pfn; - /* When hotadd a new node, the node should be empty */ + /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; -- cgit v1.2.3-59-g8ed1b From 4ada0c5a2daf11816180ec30bdbdbed1f6ff3224 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Tue, 8 Sep 2015 15:04:19 -0700 Subject: mm/page_alloc.c: fix type information of memoryless node For a memoryless node, the output of get_pfn_range_for_nid are all zero. It will display mem from 0 to -1. Signed-off-by: Zhen Lei Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b0fda2b9ca76..4a4c399baceb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5476,7 +5476,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, - (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1); + (u64)start_pfn << PAGE_SHIFT, + end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); #endif calculate_node_totalpages(pgdat, start_pfn, end_pfn, zones_size, zholes_size); -- cgit v1.2.3-59-g8ed1b From ad5ea8cd5b934cc082f2cda900b490def149908e Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Tue, 8 Sep 2015 15:04:22 -0700 Subject: mm/memblock.c: fix comment in __next_mem_range() Signed-off-by: Alexander Kuleshov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 31b06c67b1b1..1c7b647e5897 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -976,7 +976,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, * in type_b. * * @idx: pointer to u64 loop variable - * @nid: nid: node selector, %NUMA_NO_NODE for all nodes + * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken -- cgit v1.2.3-59-g8ed1b From b430d1fd6c7d22cc07e7c22a2ee1078667605313 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:24 -0700 Subject: zsmalloc: drop unused variable `nr_to_migrate' This patchset tweaks compaction and makes it possible to trigger pool compaction automatically when system is getting low on memory. zsmalloc in some cases can suffer from a notable fragmentation and compaction can release some considerable amount of memory. The problem here is that currently we fully rely on user space to perform compaction when needed. However, performing zsmalloc compaction is not always an obvious thing to do. For example, suppose we have a `idle' fragmented (compaction was never performed) zram device and system is getting low on memory due to some 3rd party user processes (gcc LTO, or firefox, etc.). It's quite unlikely that user space will issue zpool compaction in this case. Besides, user space cannot tell for sure how badly pool is fragmented; however, this info is known to zsmalloc and, hence, to a shrinker. This patch (of 7): __zs_compact() does not use `nr_to_migrate', drop it. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 0a7f81aa2249..7d816c2d74f9 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1703,7 +1703,6 @@ static struct page *isolate_source_page(struct size_class *class) static unsigned long __zs_compact(struct zs_pool *pool, struct size_class *class) { - int nr_to_migrate; struct zs_compact_control cc; struct page *src_page; struct page *dst_page = NULL; @@ -1714,8 +1713,6 @@ static unsigned long __zs_compact(struct zs_pool *pool, BUG_ON(!is_first_page(src_page)); - /* The goal is to migrate all live objects in source page */ - nr_to_migrate = src_page->inuse; cc.index = 0; cc.s_page = src_page; @@ -1730,7 +1727,6 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(pool, class, dst_page); nr_total_migrated += cc.nr_migrated; - nr_to_migrate -= cc.nr_migrated; } /* Stop if we couldn't find slot */ -- cgit v1.2.3-59-g8ed1b From 57244594195fe697f9261c7970ca25db35280967 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:27 -0700 Subject: zsmalloc: always keep per-class stats Always account per-class `zs_size_stat' stats. This data will help us make better decisions during compaction. We are especially interested in OBJ_ALLOCATED and OBJ_USED, which can tell us if class compaction will result in any memory gain. For instance, we know the number of allocated objects in the class, the number of objects being used (so we also know how many objects are not used) and the number of objects per-page. So we can ensure if we have enough unused objects to form at least one ZS_EMPTY zspage during compaction. We calculate this value on per-class basis so we can calculate a total number of zspages that can be released. Which is exactly what a shrinker wants to know. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 40 ++++++++-------------------------------- 1 file changed, 8 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7d816c2d74f9..1227f8323e93 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -169,14 +169,12 @@ enum zs_stat_type { NR_ZS_STAT_TYPE, }; -#ifdef CONFIG_ZSMALLOC_STAT - -static struct dentry *zs_stat_root; - struct zs_size_stat { unsigned long objs[NR_ZS_STAT_TYPE]; }; +#ifdef CONFIG_ZSMALLOC_STAT +static struct dentry *zs_stat_root; #endif /* @@ -201,6 +199,8 @@ static int zs_size_classes; static const int fullness_threshold_frac = 4; struct size_class { + spinlock_t lock; + struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. @@ -210,16 +210,10 @@ struct size_class { /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ int pages_per_zspage; - /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ - bool huge; - -#ifdef CONFIG_ZSMALLOC_STAT struct zs_size_stat stats; -#endif - - spinlock_t lock; - struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ + bool huge; }; /* @@ -441,8 +435,6 @@ static int get_size_class_index(int size) return min(zs_size_classes - 1, idx); } -#ifdef CONFIG_ZSMALLOC_STAT - static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { @@ -461,6 +453,8 @@ static inline unsigned long zs_stat_get(struct size_class *class, return class->stats.objs[type]; } +#ifdef CONFIG_ZSMALLOC_STAT + static int __init zs_stat_init(void) { if (!debugfs_initialized()) @@ -576,23 +570,6 @@ static void zs_pool_stat_destroy(struct zs_pool *pool) } #else /* CONFIG_ZSMALLOC_STAT */ - -static inline void zs_stat_inc(struct size_class *class, - enum zs_stat_type type, unsigned long cnt) -{ -} - -static inline void zs_stat_dec(struct size_class *class, - enum zs_stat_type type, unsigned long cnt) -{ -} - -static inline unsigned long zs_stat_get(struct size_class *class, - enum zs_stat_type type) -{ - return 0; -} - static int __init zs_stat_init(void) { return 0; @@ -610,7 +587,6 @@ static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) static inline void zs_pool_stat_destroy(struct zs_pool *pool) { } - #endif -- cgit v1.2.3-59-g8ed1b From 04f05909e0fde36ba481ad4c850b666ebef1ac55 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:30 -0700 Subject: zsmalloc: introduce zs_can_compact() function This function checks if class compaction will free any pages. Rephrasing -- do we have enough unused objects to form at least one ZS_EMPTY page and free it. It aborts compaction if class compaction will not result in any (further) savings. EXAMPLE (this debug output is not part of this patch set): - class size - number of allocated objects - number of used objects - max objects per zspage - pages per zspage - estimated number of pages that will be freed [..] class-512 objs:544 inuse:540 maxobj-per-zspage:8 pages-per-zspage:1 zspages-to-free:0 ... class-512 compaction is useless. break class-496 objs:660 inuse:570 maxobj-per-zspage:33 pages-per-zspage:4 zspages-to-free:2 class-496 objs:627 inuse:570 maxobj-per-zspage:33 pages-per-zspage:4 zspages-to-free:1 class-496 objs:594 inuse:570 maxobj-per-zspage:33 pages-per-zspage:4 zspages-to-free:0 ... class-496 compaction is useless. break class-448 objs:657 inuse:617 maxobj-per-zspage:9 pages-per-zspage:1 zspages-to-free:4 class-448 objs:648 inuse:617 maxobj-per-zspage:9 pages-per-zspage:1 zspages-to-free:3 class-448 objs:639 inuse:617 maxobj-per-zspage:9 pages-per-zspage:1 zspages-to-free:2 class-448 objs:630 inuse:617 maxobj-per-zspage:9 pages-per-zspage:1 zspages-to-free:1 class-448 objs:621 inuse:617 maxobj-per-zspage:9 pages-per-zspage:1 zspages-to-free:0 ... class-448 compaction is useless. break class-432 objs:728 inuse:685 maxobj-per-zspage:28 pages-per-zspage:3 zspages-to-free:1 class-432 objs:700 inuse:685 maxobj-per-zspage:28 pages-per-zspage:3 zspages-to-free:0 ... class-432 compaction is useless. break class-416 objs:819 inuse:705 maxobj-per-zspage:39 pages-per-zspage:4 zspages-to-free:2 class-416 objs:780 inuse:705 maxobj-per-zspage:39 pages-per-zspage:4 zspages-to-free:1 class-416 objs:741 inuse:705 maxobj-per-zspage:39 pages-per-zspage:4 zspages-to-free:0 ... class-416 compaction is useless. break class-400 objs:690 inuse:674 maxobj-per-zspage:10 pages-per-zspage:1 zspages-to-free:1 class-400 objs:680 inuse:674 maxobj-per-zspage:10 pages-per-zspage:1 zspages-to-free:0 ... class-400 compaction is useless. break class-384 objs:736 inuse:709 maxobj-per-zspage:32 pages-per-zspage:3 zspages-to-free:0 ... class-384 compaction is useless. break [..] Every "compaction is useless" indicates that we saved CPU cycles. class-512 has 544 object allocated 540 objects used 8 objects per-page Even if we have a ALMOST_EMPTY zspage, we still don't have enough room to migrate all of its objects and free this zspage; so compaction will not make a lot of sense, it's better to just leave it as is. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 1227f8323e93..4b39e5eaf34f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1676,6 +1676,29 @@ static struct page *isolate_source_page(struct size_class *class) return page; } +/* + * + * Based on the number of unused allocated objects calculate + * and return the number of pages that we can free. + * + * Should be called under class->lock. + */ +static unsigned long zs_can_compact(struct size_class *class) +{ + unsigned long obj_wasted; + + if (!zs_stat_get(class, CLASS_ALMOST_EMPTY)) + return 0; + + obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) - + zs_stat_get(class, OBJ_USED); + + obj_wasted /= get_maxobj_per_zspage(class->size, + class->pages_per_zspage); + + return obj_wasted * get_pages_per_zspage(class->size); +} + static unsigned long __zs_compact(struct zs_pool *pool, struct size_class *class) { @@ -1689,6 +1712,9 @@ static unsigned long __zs_compact(struct zs_pool *pool, BUG_ON(!is_first_page(src_page)); + if (!zs_can_compact(class)) + break; + cc.index = 0; cc.s_page = src_page; -- cgit v1.2.3-59-g8ed1b From 0dc63d488a2a433a4a85d3908b3f195c4e6450d2 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:33 -0700 Subject: zsmalloc: cosmetic compaction code adjustments Change zs_object_copy() argument order to be (DST, SRC) rather than (SRC, DST). copy/move functions usually have (to, from) arguments order. Rename alloc_target_page() to isolate_target_page(). This function doesn't allocate anything, it isolates target page, pretty much like isolate_source_page(). Tweak __zs_compact() comment. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 4b39e5eaf34f..2a1f95249f12 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_free); -static void zs_object_copy(unsigned long src, unsigned long dst, +static void zs_object_copy(unsigned long dst, unsigned long src, struct size_class *class) { struct page *s_page, *d_page; @@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, used_obj = handle_to_obj(handle); free_obj = obj_malloc(d_page, class, handle); - zs_object_copy(used_obj, free_obj, class); + zs_object_copy(free_obj, used_obj, class); index++; record_obj(handle, free_obj); unpin_tag(handle); @@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, return ret; } -static struct page *alloc_target_page(struct size_class *class) +static struct page *isolate_target_page(struct size_class *class) { int i; struct page *page; @@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, cc.index = 0; cc.s_page = src_page; - while ((dst_page = alloc_target_page(class))) { + while ((dst_page = isolate_target_page(class))) { cc.d_page = dst_page; /* - * If there is no more space in dst_page, try to - * allocate another zspage. + * If there is no more space in dst_page, resched + * and see if anyone had allocated another zspage. */ if (!migrate_zspage(pool, class, &cc)) break; -- cgit v1.2.3-59-g8ed1b From 7d3f3938236b4bb878214e6791e76fd8409bdeee Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:35 -0700 Subject: zsmalloc/zram: introduce zs_pool_stats api `zs_compact_control' accounts the number of migrated objects but it has a limited lifespan -- we lose it as soon as zs_compaction() returns back to zram. It worked fine, because (a) zram had it's own counter of migrated objects and (b) only zram could trigger compaction. However, this does not work for automatic pool compaction (not issued by zram). To account objects migrated during auto-compaction (issued by the shrinker) we need to store this number in zs_pool. Define a new `struct zs_pool_stats' structure to keep zs_pool's stats there. It provides only `num_migrated', as of this writing, but it surely can be extended. A new zsmalloc zs_pool_stats() symbol exports zs_pool's stats back to caller. Use zs_pool_stats() in zram and remove `num_migrated' from zram_stats. Signed-off-by: Sergey Senozhatsky Suggested-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 15 +++++++++------ drivers/block/zram/zram_drv.h | 1 - include/linux/zsmalloc.h | 6 ++++++ mm/zsmalloc.c | 29 +++++++++++++++-------------- 4 files changed, 30 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9c01f5bfa33f..bcde5c321090 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -388,7 +388,6 @@ static ssize_t comp_algorithm_store(struct device *dev, static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - unsigned long nr_migrated; struct zram *zram = dev_to_zram(dev); struct zram_meta *meta; @@ -399,8 +398,7 @@ static ssize_t compact_store(struct device *dev, } meta = zram->meta; - nr_migrated = zs_compact(meta->mem_pool); - atomic64_add(nr_migrated, &zram->stats.num_migrated); + zs_compact(meta->mem_pool); up_read(&zram->init_lock); return len; @@ -428,26 +426,31 @@ static ssize_t mm_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zram *zram = dev_to_zram(dev); + struct zs_pool_stats pool_stats; u64 orig_size, mem_used = 0; long max_used; ssize_t ret; + memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); + down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { mem_used = zs_get_total_pages(zram->meta->mem_pool); + zs_pool_stats(zram->meta->mem_pool, &pool_stats); + } orig_size = atomic64_read(&zram->stats.pages_stored); max_used = atomic_long_read(&zram->stats.max_used_pages); ret = scnprintf(buf, PAGE_SIZE, - "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", + "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", orig_size << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.compr_data_size), mem_used << PAGE_SHIFT, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.zero_pages), - (u64)atomic64_read(&zram->stats.num_migrated)); + pool_stats.num_migrated); up_read(&zram->init_lock); return ret; diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 6dbe2df506bf..8e92339686d7 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -78,7 +78,6 @@ struct zram_stats { atomic64_t compr_data_size; /* compressed size of pages stored */ atomic64_t num_reads; /* failed + successful */ atomic64_t num_writes; /* --do-- */ - atomic64_t num_migrated; /* no. of migrated object */ atomic64_t failed_reads; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 1338190b5478..ad3d23239043 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -34,6 +34,11 @@ enum zs_mapmode { */ }; +struct zs_pool_stats { + /* How many objects were migrated */ + unsigned long num_migrated; +}; + struct zs_pool; struct zs_pool *zs_create_pool(char *name, gfp_t flags); @@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle); unsigned long zs_get_total_pages(struct zs_pool *pool); unsigned long zs_compact(struct zs_pool *pool); +void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2a1f95249f12..8f76d8875aca 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -245,6 +245,7 @@ struct zs_pool { gfp_t flags; /* allocation flags used when growing pool */ atomic_long_t pages_allocated; + struct zs_pool_stats stats; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -1578,7 +1579,7 @@ struct zs_compact_control { /* Starting object index within @s_page which used for live object * in the subpage. */ int index; - /* how many of objects are migrated */ + /* How many of objects were migrated */ int nr_migrated; }; @@ -1590,7 +1591,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, struct page *s_page = cc->s_page; struct page *d_page = cc->d_page; unsigned long index = cc->index; - int nr_migrated = 0; int ret = 0; while (1) { @@ -1617,13 +1617,12 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, record_obj(handle, free_obj); unpin_tag(handle); obj_free(pool, class, used_obj); - nr_migrated++; + cc->nr_migrated++; } /* Remember last position in this iteration */ cc->s_page = s_page; cc->index = index; - cc->nr_migrated = nr_migrated; return ret; } @@ -1699,14 +1698,13 @@ static unsigned long zs_can_compact(struct size_class *class) return obj_wasted * get_pages_per_zspage(class->size); } -static unsigned long __zs_compact(struct zs_pool *pool, - struct size_class *class) +static void __zs_compact(struct zs_pool *pool, struct size_class *class) { struct zs_compact_control cc; struct page *src_page; struct page *dst_page = NULL; - unsigned long nr_total_migrated = 0; + cc.nr_migrated = 0; spin_lock(&class->lock); while ((src_page = isolate_source_page(class))) { @@ -1728,7 +1726,6 @@ static unsigned long __zs_compact(struct zs_pool *pool, break; putback_zspage(pool, class, dst_page); - nr_total_migrated += cc.nr_migrated; } /* Stop if we couldn't find slot */ @@ -1738,7 +1735,6 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(pool, class, dst_page); putback_zspage(pool, class, src_page); spin_unlock(&class->lock); - nr_total_migrated += cc.nr_migrated; cond_resched(); spin_lock(&class->lock); } @@ -1746,15 +1742,14 @@ static unsigned long __zs_compact(struct zs_pool *pool, if (src_page) putback_zspage(pool, class, src_page); - spin_unlock(&class->lock); + pool->stats.num_migrated += cc.nr_migrated; - return nr_total_migrated; + spin_unlock(&class->lock); } unsigned long zs_compact(struct zs_pool *pool) { int i; - unsigned long nr_migrated = 0; struct size_class *class; for (i = zs_size_classes - 1; i >= 0; i--) { @@ -1763,13 +1758,19 @@ unsigned long zs_compact(struct zs_pool *pool) continue; if (class->index != i) continue; - nr_migrated += __zs_compact(pool, class); + __zs_compact(pool, class); } - return nr_migrated; + return pool->stats.num_migrated; } EXPORT_SYMBOL_GPL(zs_compact); +void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) +{ + memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); +} +EXPORT_SYMBOL_GPL(zs_pool_stats); + /** * zs_create_pool - Creates an allocation pool to work from. * @flags: allocation flags used to allocate pool metadata -- cgit v1.2.3-59-g8ed1b From 860c707dca155a56dfa115ddd6c00959296144a6 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:38 -0700 Subject: zsmalloc: account the number of compacted pages Compaction returns back to zram the number of migrated objects, which is quite uninformative -- we have objects of different sizes so user space cannot obtain any valuable data from that number. Change compaction to operate in terms of pages and return back to compaction issuer the number of pages that were freed during compaction. So from now on we will export more meaningful value in zram/mm_stat -- the number of freed (compacted) pages. This requires: (a) a rename of `num_migrated' to 'pages_compacted' (b) a internal API change -- return first_page's fullness_group from putback_zspage(), so we know when putback_zspage() did free_zspage(). It helps us to account compaction stats correctly. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/blockdev/zram.txt | 3 ++- drivers/block/zram/zram_drv.c | 2 +- include/linux/zsmalloc.h | 4 ++-- mm/zsmalloc.c | 27 +++++++++++++++++---------- 4 files changed, 22 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index c4de576093af..62435bb25266 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -144,7 +144,8 @@ mem_used_max RW the maximum amount memory zram have consumed to store compressed data mem_limit RW the maximum amount of memory ZRAM can use to store the compressed data -num_migrated RO the number of objects migrated migrated by compaction +pages_compacted RO the number of pages freed during compaction + (available only via zram/mm_stat node) compact WO trigger memory compaction WARNING diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index bcde5c321090..f1c4bb34e007 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -450,7 +450,7 @@ static ssize_t mm_stat_show(struct device *dev, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.zero_pages), - pool_stats.num_migrated); + pool_stats.pages_compacted); up_read(&zram->init_lock); return ret; diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index ad3d23239043..6398dfae53f1 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -35,8 +35,8 @@ enum zs_mapmode { }; struct zs_pool_stats { - /* How many objects were migrated */ - unsigned long num_migrated; + /* How many pages were migrated (freed) */ + unsigned long pages_compacted; }; struct zs_pool; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 8f76d8875aca..b7b4a5612ec7 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1579,8 +1579,6 @@ struct zs_compact_control { /* Starting object index within @s_page which used for live object * in the subpage. */ int index; - /* How many of objects were migrated */ - int nr_migrated; }; static int migrate_zspage(struct zs_pool *pool, struct size_class *class, @@ -1617,7 +1615,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, record_obj(handle, free_obj); unpin_tag(handle); obj_free(pool, class, used_obj); - cc->nr_migrated++; } /* Remember last position in this iteration */ @@ -1643,8 +1640,17 @@ static struct page *isolate_target_page(struct size_class *class) return page; } -static void putback_zspage(struct zs_pool *pool, struct size_class *class, - struct page *first_page) +/* + * putback_zspage - add @first_page into right class's fullness list + * @pool: target pool + * @class: destination class + * @first_page: target page + * + * Return @fist_page's fullness_group + */ +static enum fullness_group putback_zspage(struct zs_pool *pool, + struct size_class *class, + struct page *first_page) { enum fullness_group fullness; @@ -1662,6 +1668,8 @@ static void putback_zspage(struct zs_pool *pool, struct size_class *class, free_zspage(first_page); } + + return fullness; } static struct page *isolate_source_page(struct size_class *class) @@ -1704,7 +1712,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) struct page *src_page; struct page *dst_page = NULL; - cc.nr_migrated = 0; spin_lock(&class->lock); while ((src_page = isolate_source_page(class))) { @@ -1733,7 +1740,9 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) break; putback_zspage(pool, class, dst_page); - putback_zspage(pool, class, src_page); + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) + pool->stats.pages_compacted += + get_pages_per_zspage(class->size); spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); @@ -1742,8 +1751,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) if (src_page) putback_zspage(pool, class, src_page); - pool->stats.num_migrated += cc.nr_migrated; - spin_unlock(&class->lock); } @@ -1761,7 +1768,7 @@ unsigned long zs_compact(struct zs_pool *pool) __zs_compact(pool, class); } - return pool->stats.num_migrated; + return pool->stats.pages_compacted; } EXPORT_SYMBOL_GPL(zs_compact); -- cgit v1.2.3-59-g8ed1b From ab9d306d9c3bf64b1dbad127aa13252cc550f839 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:41 -0700 Subject: zsmalloc: use shrinker to trigger auto-compaction Perform automatic pool compaction by a shrinker when system is getting tight on memory. User-space has a very little knowledge regarding zsmalloc fragmentation and basically has no mechanism to tell whether compaction will result in any memory gain. Another issue is that user space is not always aware of the fact that system is getting tight on memory. Which leads to very uncomfortable scenarios when user space may start issuing compaction 'randomly' or from crontab (for example). Fragmentation is not always necessarily bad, allocated and unused objects, after all, may be filled with the data later, w/o the need of allocating a new zspage. On the other hand, we obviously don't want to waste memory when the system needs it. Compaction now has a relatively quick pool scan so we are able to estimate the number of pages that will be freed easily, which makes it possible to call this function from a shrinker->count_objects() callback. We also abort compaction as soon as we detect that we can't free any pages any more, preventing wasteful objects migrations. Signed-off-by: Sergey Senozhatsky Suggested-by: Minchan Kim Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7b4a5612ec7..27b9661c8fa6 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -246,6 +246,14 @@ struct zs_pool { atomic_long_t pages_allocated; struct zs_pool_stats stats; + + /* Compact classes */ + struct shrinker shrinker; + /* + * To signify that register_shrinker() was successful + * and unregister_shrinker() will not Oops. + */ + bool shrinker_enabled; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -1778,6 +1786,69 @@ void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) } EXPORT_SYMBOL_GPL(zs_pool_stats); +static unsigned long zs_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long pages_freed; + struct zs_pool *pool = container_of(shrinker, struct zs_pool, + shrinker); + + pages_freed = pool->stats.pages_compacted; + /* + * Compact classes and calculate compaction delta. + * Can run concurrently with a manually triggered + * (by user) compaction. + */ + pages_freed = zs_compact(pool) - pages_freed; + + return pages_freed ? pages_freed : SHRINK_STOP; +} + +static unsigned long zs_shrinker_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + int i; + struct size_class *class; + unsigned long pages_to_free = 0; + struct zs_pool *pool = container_of(shrinker, struct zs_pool, + shrinker); + + if (!pool->shrinker_enabled) + return 0; + + for (i = zs_size_classes - 1; i >= 0; i--) { + class = pool->size_class[i]; + if (!class) + continue; + if (class->index != i) + continue; + + spin_lock(&class->lock); + pages_to_free += zs_can_compact(class); + spin_unlock(&class->lock); + } + + return pages_to_free; +} + +static void zs_unregister_shrinker(struct zs_pool *pool) +{ + if (pool->shrinker_enabled) { + unregister_shrinker(&pool->shrinker); + pool->shrinker_enabled = false; + } +} + +static int zs_register_shrinker(struct zs_pool *pool) +{ + pool->shrinker.scan_objects = zs_shrinker_scan; + pool->shrinker.count_objects = zs_shrinker_count; + pool->shrinker.batch = 0; + pool->shrinker.seeks = DEFAULT_SEEKS; + + return register_shrinker(&pool->shrinker); +} + /** * zs_create_pool - Creates an allocation pool to work from. * @flags: allocation flags used to allocate pool metadata @@ -1863,6 +1934,12 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags) if (zs_pool_stat_create(name, pool)) goto err; + /* + * Not critical, we still can use the pool + * and user can trigger compaction manually. + */ + if (zs_register_shrinker(pool) == 0) + pool->shrinker_enabled = true; return pool; err: @@ -1875,6 +1952,7 @@ void zs_destroy_pool(struct zs_pool *pool) { int i; + zs_unregister_shrinker(pool); zs_pool_stat_destroy(pool); for (i = 0; i < zs_size_classes; i++) { -- cgit v1.2.3-59-g8ed1b From 58f171174625150f3aaad0cddd3e365270b8e1b8 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:44 -0700 Subject: zsmalloc: partial page ordering within a fullness_list We want to see more ZS_FULL pages and less ZS_ALMOST_{FULL, EMPTY} pages. Put a page with higher ->inuse count first within its ->fullness_list, which will give us better chances to fill up this page with new objects (find_get_zspage() return ->fullness_list head for new object allocation), so some zspages will become ZS_ALMOST_FULL/ZS_FULL quicker. It performs a trivial and cheap ->inuse compare which does not slow down zsmalloc and in the worst case keeps the list pages in no particular order. A more expensive solution could sort fullness_list by ->inuse count. [minchan@kernel.org: code adjustments] Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 27b9661c8fa6..615b9b9b45eb 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -643,13 +643,22 @@ static void insert_zspage(struct page *page, struct size_class *class, if (fullness >= _ZS_NR_FULLNESS_GROUPS) return; - head = &class->fullness_list[fullness]; - if (*head) - list_add_tail(&page->lru, &(*head)->lru); - - *head = page; zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); + + head = &class->fullness_list[fullness]; + if (!*head) { + *head = page; + return; + } + + /* + * We want to see more ZS_FULL pages and less almost + * empty/full. Put pages with higher ->inuse first. + */ + list_add_tail(&page->lru, &(*head)->lru); + if (page->inuse >= (*head)->inuse) + *head = page; } /* -- cgit v1.2.3-59-g8ed1b From ad9d5e175a77a253f52a7259a7c918b8351d99f1 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 8 Sep 2015 15:04:47 -0700 Subject: zsmalloc: consider ZS_ALMOST_FULL as migrate source There is no reason to prevent select ZS_ALMOST_FULL as migration source if we cannot find source from ZS_ALMOST_EMPTY. With this patch, zs_can_compact will return more exact result. Signed-off-by: Minchan Kim Acked-by: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 615b9b9b45eb..c10885ca87a4 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1691,11 +1691,17 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, static struct page *isolate_source_page(struct size_class *class) { - struct page *page; + int i; + struct page *page = NULL; + + for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) { + page = class->fullness_list[i]; + if (!page) + continue; - page = class->fullness_list[ZS_ALMOST_EMPTY]; - if (page) - remove_zspage(page, class, ZS_ALMOST_EMPTY); + remove_zspage(page, class, i); + break; + } return page; } @@ -1711,9 +1717,6 @@ static unsigned long zs_can_compact(struct size_class *class) { unsigned long obj_wasted; - if (!zs_stat_get(class, CLASS_ALMOST_EMPTY)) - return 0; - obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) - zs_stat_get(class, OBJ_USED); -- cgit v1.2.3-59-g8ed1b From 6cbf16b3b66a61b9c6df8f2ed4ac346cb427f28a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 8 Sep 2015 15:04:49 -0700 Subject: zsmalloc: use class->pages_per_zspage There is no need to recalcurate pages_per_zspage in runtime. Just use class->pages_per_zspage to avoid unnecessary runtime overhead. Signed-off-by: Minchan Kim Acked-by: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c10885ca87a4..ce08d043becd 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class) obj_wasted /= get_maxobj_per_zspage(class->size, class->pages_per_zspage); - return obj_wasted * get_pages_per_zspage(class->size); + return obj_wasted * class->pages_per_zspage; } static void __zs_compact(struct zs_pool *pool, struct size_class *class) @@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(pool, class, dst_page); if (putback_zspage(pool, class, src_page) == ZS_EMPTY) - pool->stats.pages_compacted += - get_pages_per_zspage(class->size); + pool->stats.pages_compacted += class->pages_per_zspage; spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); -- cgit v1.2.3-59-g8ed1b From b3e237f1f5a86030c875e186ff19640f4f4f3c63 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:52 -0700 Subject: zsmalloc: do not take class lock in zs_shrinker_count() We can avoid taking class ->lock around zs_can_compact() in zs_shrinker_count(), because the number that we return back is outdated in general case, by design. We have different sources that are able to change class's state right after we return from zs_can_compact() -- ongoing I/O operations, manually triggered compaction, or two of them happening simultaneously. We re-do this calculations during compaction on a per class basis anyway. zs_unregister_shrinker() will not return until we have an active shrinker, so classes won't unexpectedly disappear while zs_shrinker_count() iterates them. Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index ce08d043becd..c19b99c8a457 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1710,8 +1710,6 @@ static struct page *isolate_source_page(struct size_class *class) * * Based on the number of unused allocated objects calculate * and return the number of pages that we can free. - * - * Should be called under class->lock. */ static unsigned long zs_can_compact(struct size_class *class) { @@ -1834,9 +1832,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker, if (class->index != i) continue; - spin_lock(&class->lock); pages_to_free += zs_can_compact(class); - spin_unlock(&class->lock); } return pages_to_free; -- cgit v1.2.3-59-g8ed1b From cd10add00c1b31cd664a31108a9b395025def50a Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 8 Sep 2015 15:04:55 -0700 Subject: zsmalloc: remove null check from destroy_handle_cache() We can pass a NULL cache pointer to kmem_cache_destroy(), because it NULL-checks its argument now. Remove redundant test from destroy_handle_cache(). Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c19b99c8a457..089120429c18 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -288,8 +288,7 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - if (pool->handle_cachep) - kmem_cache_destroy(pool->handle_cachep); + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) -- cgit v1.2.3-59-g8ed1b From 5b999aadbae65696a148f55250d94b6f3d74071e Mon Sep 17 00:00:00 2001 From: Dmitry Safonov <0x7f454c46@gmail.com> Date: Tue, 8 Sep 2015 15:05:00 -0700 Subject: mm: swap: zswap: maybe_preload & refactoring zswap_get_swap_cache_page and read_swap_cache_async have pretty much the same code with only significant difference in return value and usage of swap_readpage. I a helper __read_swap_cache_async() with the common code. Behavior change: now zswap_get_swap_cache_page will use radix_tree_maybe_preload instead radix_tree_preload. Looks like, this wasn't changed only by the reason of code duplication. Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Johannes Weiner Cc: Vladimir Davydov Cc: Michal Hocko Cc: Hugh Dickins Cc: Minchan Kim Cc: Tejun Heo Cc: Jens Axboe Cc: Christoph Hellwig Cc: David Herrmann Cc: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 3 +++ mm/swap_state.c | 37 ++++++++++++++++++-------- mm/zswap.c | 73 +++++----------------------------------------------- 3 files changed, 35 insertions(+), 78 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index 2ce190709280..7ba7dccaf0e7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -406,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t); extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr); +extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, + struct vm_area_struct *vma, unsigned long addr, + bool *new_page_allocated); extern struct page *swapin_readahead(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr); diff --git a/mm/swap_state.c b/mm/swap_state.c index 8bc8e66138da..d504adb7fa5f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -288,17 +288,14 @@ struct page * lookup_swap_cache(swp_entry_t entry) return page; } -/* - * Locate a page of swap in physical memory, reserving swap cache space - * and reading the disk if it is not already cached. - * A failure return means that either the page allocation failed or that - * the swap entry is no longer in use. - */ -struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr, + bool *new_page_allocated) { struct page *found_page, *new_page = NULL; + struct address_space *swapper_space = swap_address_space(entry); int err; + *new_page_allocated = false; do { /* @@ -306,8 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ - found_page = find_get_page(swap_address_space(entry), - entry.val); + found_page = find_get_page(swapper_space, entry.val); if (found_page) break; @@ -366,7 +362,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * Initiate read into locked page and return. */ lru_cache_add_anon(new_page); - swap_readpage(new_page); + *new_page_allocated = true; return new_page; } radix_tree_preload_end(); @@ -384,6 +380,25 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, return found_page; } +/* + * Locate a page of swap in physical memory, reserving swap cache space + * and reading the disk if it is not already cached. + * A failure return means that either the page allocation failed or that + * the swap entry is no longer in use. + */ +struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr) +{ + bool page_was_allocated; + struct page *retpage = __read_swap_cache_async(entry, gfp_mask, + vma, addr, &page_was_allocated); + + if (page_was_allocated) + swap_readpage(retpage); + + return retpage; +} + static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; diff --git a/mm/zswap.c b/mm/zswap.c index 2d5727baed59..09208c7c86f3 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -446,75 +446,14 @@ enum zswap_get_swap_ret { static int zswap_get_swap_cache_page(swp_entry_t entry, struct page **retpage) { - struct page *found_page, *new_page = NULL; - struct address_space *swapper_space = swap_address_space(entry); - int err; + bool page_was_allocated; - *retpage = NULL; - do { - /* - * First check the swap cache. Since this is normally - * called after lookup_swap_cache() failed, re-calling - * that would confuse statistics. - */ - found_page = find_get_page(swapper_space, entry.val); - if (found_page) - break; - - /* - * Get a new page to read into from swap. - */ - if (!new_page) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) - break; /* Out of memory */ - } - - /* - * call radix_tree_preload() while we can wait. - */ - err = radix_tree_preload(GFP_KERNEL); - if (err) - break; - - /* - * Swap entry may have been freed since our caller observed it. - */ - err = swapcache_prepare(entry); - if (err == -EEXIST) { /* seems racy */ - radix_tree_preload_end(); - continue; - } - if (err) { /* swp entry is obsolete ? */ - radix_tree_preload_end(); - break; - } - - /* May fail (-ENOMEM) if radix-tree node allocation failed. */ - __set_page_locked(new_page); - SetPageSwapBacked(new_page); - err = __add_to_swap_cache(new_page, entry); - if (likely(!err)) { - radix_tree_preload_end(); - lru_cache_add_anon(new_page); - *retpage = new_page; - return ZSWAP_SWAPCACHE_NEW; - } - radix_tree_preload_end(); - ClearPageSwapBacked(new_page); - __clear_page_locked(new_page); - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ - swapcache_free(entry); - } while (err != -ENOMEM); - - if (new_page) - page_cache_release(new_page); - if (!found_page) + *retpage = __read_swap_cache_async(entry, GFP_KERNEL, + NULL, 0, &page_was_allocated); + if (page_was_allocated) + return ZSWAP_SWAPCACHE_NEW; + if (!*retpage) return ZSWAP_SWAPCACHE_FAIL; - *retpage = found_page; return ZSWAP_SWAPCACHE_EXIST; } -- cgit v1.2.3-59-g8ed1b From 786727799a85aeabc20cab5ecfb72771bcbd6b85 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 8 Sep 2015 15:05:03 -0700 Subject: mm: zpool: constify the zpool_ops The structure zpool_ops is not modified so make the pointer to it a pointer to const. Signed-off-by: Krzysztof Kozlowski Acked-by: Dan Streetman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/zpool.h | 4 ++-- mm/zbud.c | 4 ++-- mm/zpool.c | 4 ++-- mm/zsmalloc.c | 3 ++- mm/zswap.c | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/include/linux/zpool.h b/include/linux/zpool.h index d30eff3d84d5..c924a28d9805 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -37,7 +37,7 @@ enum zpool_mapmode { }; struct zpool *zpool_create_pool(char *type, char *name, - gfp_t gfp, struct zpool_ops *ops); + gfp_t gfp, const struct zpool_ops *ops); char *zpool_get_type(struct zpool *pool); @@ -81,7 +81,7 @@ struct zpool_driver { atomic_t refcount; struct list_head list; - void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops, + void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops, struct zpool *zpool); void (*destroy)(void *pool); diff --git a/mm/zbud.c b/mm/zbud.c index f3bf6f7627d8..6f8158d64864 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -99,7 +99,7 @@ struct zbud_pool { struct zbud_ops *ops; #ifdef CONFIG_ZPOOL struct zpool *zpool; - struct zpool_ops *zpool_ops; + const struct zpool_ops *zpool_ops; #endif }; @@ -138,7 +138,7 @@ static struct zbud_ops zbud_zpool_ops = { }; static void *zbud_zpool_create(char *name, gfp_t gfp, - struct zpool_ops *zpool_ops, + const struct zpool_ops *zpool_ops, struct zpool *zpool) { struct zbud_pool *pool; diff --git a/mm/zpool.c b/mm/zpool.c index 722a4f60e90b..951db32b833f 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -22,7 +22,7 @@ struct zpool { struct zpool_driver *driver; void *pool; - struct zpool_ops *ops; + const struct zpool_ops *ops; struct list_head list; }; @@ -115,7 +115,7 @@ static void zpool_put_driver(struct zpool_driver *driver) * Returns: New zpool on success, NULL on failure. */ struct zpool *zpool_create_pool(char *type, char *name, gfp_t gfp, - struct zpool_ops *ops) + const struct zpool_ops *ops) { struct zpool_driver *driver; struct zpool *zpool; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 089120429c18..f135b1b6fcdc 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -311,7 +311,8 @@ static void record_obj(unsigned long handle, unsigned long obj) #ifdef CONFIG_ZPOOL -static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops, +static void *zs_zpool_create(char *name, gfp_t gfp, + const struct zpool_ops *zpool_ops, struct zpool *zpool) { return zs_create_pool(name, gfp); diff --git a/mm/zswap.c b/mm/zswap.c index 09208c7c86f3..48a1d081e2a5 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -755,7 +755,7 @@ static void zswap_frontswap_invalidate_area(unsigned type) zswap_trees[type] = NULL; } -static struct zpool_ops zswap_zpool_ops = { +static const struct zpool_ops zswap_zpool_ops = { .evict = zswap_writeback_entry }; -- cgit v1.2.3-59-g8ed1b From c83db4f419e7105af38cdcca80cc51213214a2c8 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 8 Sep 2015 15:05:06 -0700 Subject: mm: zbud: constify the zbud_ops The structure zbud_ops is not modified so make the pointer to it a pointer to const. Signed-off-by: Krzysztof Kozlowski Acked-by: Dan Streetman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/zbud.h | 2 +- mm/zbud.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/zbud.h b/include/linux/zbud.h index f9d41a6e361f..e183a0a65ac1 100644 --- a/include/linux/zbud.h +++ b/include/linux/zbud.h @@ -9,7 +9,7 @@ struct zbud_ops { int (*evict)(struct zbud_pool *pool, unsigned long handle); }; -struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); +struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops); void zbud_destroy_pool(struct zbud_pool *pool); int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle); diff --git a/mm/zbud.c b/mm/zbud.c index 6f8158d64864..fa48bcdff9d5 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -96,7 +96,7 @@ struct zbud_pool { struct list_head buddied; struct list_head lru; u64 pages_nr; - struct zbud_ops *ops; + const struct zbud_ops *ops; #ifdef CONFIG_ZPOOL struct zpool *zpool; const struct zpool_ops *zpool_ops; @@ -133,7 +133,7 @@ static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) return -ENOENT; } -static struct zbud_ops zbud_zpool_ops = { +static const struct zbud_ops zbud_zpool_ops = { .evict = zbud_zpool_evict }; @@ -302,7 +302,7 @@ static int num_free_chunks(struct zbud_header *zhdr) * Return: pointer to the new zbud pool or NULL if the metadata allocation * failed. */ -struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops) +struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) { struct zbud_pool *pool; int i; -- cgit v1.2.3-59-g8ed1b From df69f52d990bd85159727bd26e819d3a6e49c666 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Tue, 8 Sep 2015 15:05:09 -0700 Subject: zpool: remove no-op module init/exit Remove zpool_init() and zpool_exit(); they do nothing other than print "loaded" and "unloaded". Signed-off-by: Dan Streetman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zpool.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'mm') diff --git a/mm/zpool.c b/mm/zpool.c index 951db32b833f..68d2dd8ed2d8 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -320,20 +320,6 @@ u64 zpool_get_total_size(struct zpool *zpool) return zpool->driver->total_size(zpool->pool); } -static int __init init_zpool(void) -{ - pr_info("loaded\n"); - return 0; -} - -static void __exit exit_zpool(void) -{ - pr_info("unloaded\n"); -} - -module_init(init_zpool); -module_exit(exit_zpool); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("Common API for compressed memory storage"); -- cgit v1.2.3-59-g8ed1b