From bc2466e4257369d0ebee2b6265070d323343fa72 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 12 May 2016 18:29:19 +0200 Subject: dax: Use radix tree entry lock to protect cow faults When doing cow faults, we cannot directly fill in PTE as we do for other faults as we rely on generic code to do proper accounting of the cowed page. We also have no page to lock to protect against races with truncate as other faults have and we need the protection to extend until the moment generic code inserts cowed page into PTE thus at that point we have no protection of fs-specific i_mmap_sem. So far we relied on using i_mmap_lock for the protection however that is completely special to cow faults. To make fault locking more uniform use DAX entry lock instead. Reviewed-by: Ross Zwisler Signed-off-by: Jan Kara Signed-off-by: Ross Zwisler --- mm/memory.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 93897f23cc11..f09cdb8d48fa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -2785,7 +2786,8 @@ oom: */ static int __do_fault(struct vm_area_struct *vma, unsigned long address, pgoff_t pgoff, unsigned int flags, - struct page *cow_page, struct page **page) + struct page *cow_page, struct page **page, + void **entry) { struct vm_fault vmf; int ret; @@ -2800,8 +2802,10 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - if (!vmf.page) - goto out; + if (ret & VM_FAULT_DAX_LOCKED) { + *entry = vmf.entry; + return ret; + } if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) @@ -2815,7 +2819,6 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, else VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); - out: *page = vmf.page; return ret; } @@ -2987,7 +2990,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); } - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3010,6 +3013,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { struct page *fault_page, *new_page; + void *fault_entry; struct mem_cgroup *memcg; spinlock_t *ptl; pte_t *pte; @@ -3027,26 +3031,24 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; } - ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page, + &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; - if (fault_page) + if (!(ret & VM_FAULT_DAX_LOCKED)) copy_user_highpage(new_page, fault_page, address, vma); __SetPageUptodate(new_page); pte = pte_offset_map_lock(mm, pmd, address, &ptl); if (unlikely(!pte_same(*pte, orig_pte))) { pte_unmap_unlock(pte, ptl); - if (fault_page) { + if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { - /* - * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. - */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + dax_unlock_mapping_entry(vma->vm_file->f_mapping, + pgoff); } goto uncharge_out; } @@ -3054,15 +3056,11 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); pte_unmap_unlock(pte, ptl); - if (fault_page) { + if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { - /* - * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. - */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); } return ret; uncharge_out: @@ -3082,7 +3080,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, int dirtied = 0; int ret, tmp; - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); + ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; -- cgit v1.3-8-gc7d7 From 4d9a2c8746671efbb0c27d3ae28c7474597a7aad Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 12 May 2016 18:29:20 +0200 Subject: dax: Remove i_mmap_lock protection Currently faults are protected against truncate by filesystem specific i_mmap_sem and page lock in case of hole page. Cow faults are protected DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX code. Remove it. Reviewed-by: Ross Zwisler Signed-off-by: Jan Kara Signed-off-by: Ross Zwisler --- fs/dax.c | 24 +++++------------------- mm/memory.c | 2 -- 2 files changed, 5 insertions(+), 21 deletions(-) (limited to 'mm/memory.c') diff --git a/fs/dax.c b/fs/dax.c index be74635e05a6..6dbe6021cab7 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping, .sector = to_sector(bh, mapping->host), .size = bh->b_size, }; - int error; void *ret; void *entry = *entryp; - i_mmap_lock_read(mapping); - - if (dax_map_atomic(bdev, &dax) < 0) { - error = PTR_ERR(dax.addr); - goto out; - } + if (dax_map_atomic(bdev, &dax) < 0) + return PTR_ERR(dax.addr); dax_unmap_atomic(bdev, &dax); ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); - if (IS_ERR(ret)) { - error = PTR_ERR(ret); - goto out; - } + if (IS_ERR(ret)) + return PTR_ERR(ret); *entryp = ret; - error = vm_insert_mixed(vma, vaddr, dax.pfn); - out: - i_mmap_unlock_read(mapping); - return error; + return vm_insert_mixed(vma, vaddr, dax.pfn); } /** @@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, truncate_pagecache_range(inode, lstart, lend); } - i_mmap_lock_read(mapping); - if (!write && !buffer_mapped(&bh)) { spinlock_t *ptl; pmd_t entry; @@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, } out: - i_mmap_unlock_read(mapping); - return result; fallback: diff --git a/mm/memory.c b/mm/memory.c index f09cdb8d48fa..06f552504e79 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2453,8 +2453,6 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; - - /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); -- cgit v1.3-8-gc7d7 From d5957d2fc232a689543bdbed1a5ff8002f0e9843 Mon Sep 17 00:00:00 2001 From: Yongji Xie Date: Fri, 20 May 2016 16:57:41 -0700 Subject: mm: fix incorrect pfn passed to untrack_pfn() in remap_pfn_range() We use generic hooks in remap_pfn_range() to help archs to track pfnmap regions. The code is something like: int remap_pfn_range() { ... track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); ... pfn -= addr >> PAGE_SHIFT; ... untrack_pfn(vma, pfn, PAGE_ALIGN(size)); ... } Here we can easily find the pfn is changed but not recovered before untrack_pfn() is called. That's incorrect. There are no known runtime effects - this is from inspection. Signed-off-by: Yongji Xie Cc: Kirill A. Shutemov Cc: Jerome Marchand Cc: Ingo Molnar Cc: Vlastimil Babka Cc: Dave Hansen Cc: Dan Williams Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Michal Hocko Cc: Andy Lutomirski Cc: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 07493e34ab7e..007c72ad03f6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1744,6 +1744,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; + unsigned long remap_pfn = pfn; int err; /* @@ -1770,7 +1771,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_pgoff = pfn; } - err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); + err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); if (err) return -EINVAL; @@ -1789,7 +1790,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } while (pgd++, addr = next, addr != end); if (err) - untrack_pfn(vma, pfn, PAGE_ALIGN(size)); + untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); return err; } -- cgit v1.3-8-gc7d7 From 5c0a85fad949212b3e059692deecdeed74ae7ec7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 20 May 2016 16:58:41 -0700 Subject: mm: make faultaround produce old ptes Currently, faultaround code produces young pte. This can screw up vmscan behaviour[1], as it makes vmscan think that these pages are hot and not push them out on first round. During sparse file access faultaround gets more pages mapped and all of them are young. Under memory pressure, this makes vmscan swap out anon pages instead, or to drop other page cache pages which otherwise stay resident. Modify faultaround to produce old ptes, so they can easily be reclaimed under memory pressure. This can to some extend defeat the purpose of faultaround on machines without hardware accessed bit as it will not help us with reducing the number of minor page faults. We may want to disable faultaround on such machines altogether, but that's subject for separate patchset. Minchan: "I tested 512M mmap sequential word read test on non-HW access bit system (i.e., ARM) and confirmed it doesn't increase minor fault any more. old: 4096 fault_around minor fault: 131291 elapsed time: 6747645 usec new: 65536 fault_around minor fault: 131291 elapsed time: 6709263 usec 0.56% benefit" [1] https://lkml.kernel.org/r/1460992636-711-1-git-send-email-vinmenon@codeaurora.org Link: http://lkml.kernel.org/r/1463488366-47723-1-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Acked-by: Michal Hocko Acked-by: Minchan Kim Tested-by: Minchan Kim Acked-by: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- mm/filemap.c | 2 +- mm/memory.c | 23 ++++++++++++++++++----- 3 files changed, 20 insertions(+), 7 deletions(-) (limited to 'mm/memory.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index fbdb9d40847f..f223ac26b5d9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -596,7 +596,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon); + struct page *page, pte_t *pte, bool write, bool anon, bool old); #endif /* diff --git a/mm/filemap.c b/mm/filemap.c index 8f4859989f1b..b418405903bc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2191,7 +2191,7 @@ repeat: if (file->f_ra.mmap_miss > 0) file->f_ra.mmap_miss--; addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; - do_set_pte(vma, addr, page, pte, false, false); + do_set_pte(vma, addr, page, pte, false, false, true); unlock_page(page); goto next; unlock: diff --git a/mm/memory.c b/mm/memory.c index 007c72ad03f6..f29e5ab0342d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2876,7 +2876,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, * vm_ops->map_pages. */ void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon) + struct page *page, pte_t *pte, bool write, bool anon, bool old) { pte_t entry; @@ -2884,6 +2884,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, entry = mk_pte(page, vma->vm_page_prot); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (old) + entry = pte_mkold(entry); if (anon) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address, false); @@ -3021,9 +3023,20 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { pte = pte_offset_map_lock(mm, pmd, address, &ptl); - do_fault_around(vma, address, pte, pgoff, flags); if (!pte_same(*pte, orig_pte)) goto unlock_out; + do_fault_around(vma, address, pte, pgoff, flags); + /* Check if the fault is handled by faultaround */ + if (!pte_same(*pte, orig_pte)) { + /* + * Faultaround produce old pte, but the pte we've + * handler fault for should be young. + */ + pte_t entry = pte_mkyoung(*pte); + if (ptep_set_access_flags(vma, address, pte, entry, 0)) + update_mmu_cache(vma, address, pte); + goto unlock_out; + } pte_unmap_unlock(pte, ptl); } @@ -3038,7 +3051,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, false, false); + do_set_pte(vma, address, fault_page, pte, false, false, false); unlock_page(fault_page); unlock_out: pte_unmap_unlock(pte, ptl); @@ -3090,7 +3103,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } goto uncharge_out; } - do_set_pte(vma, address, new_page, pte, true, true); + do_set_pte(vma, address, new_page, pte, true, true, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); pte_unmap_unlock(pte, ptl); @@ -3147,7 +3160,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, true, false); + do_set_pte(vma, address, fault_page, pte, true, false, false); pte_unmap_unlock(pte, ptl); if (set_page_dirty(fault_page)) -- cgit v1.3-8-gc7d7 From d0834a6c2c5b0c76cfb806bd7dba6556d8b4edbb Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Fri, 20 May 2016 16:58:44 -0700 Subject: mm: disable fault around on emulated access bit architecture fault_around aims to reduce minor faults of file-backed pages via speculative ahead pte mapping and relying on readahead logic. However, on non-HW access bit architecture the benefit is highly limited because they should emulate the young bit with minor faults for reclaim's page aging algorithm. IOW, we cannot reduce minor faults on those architectures. I did quick a test on my ARM machine. 512M file mmap sequential every word read on eSATA drive 4 times. stddev is stable. = fault_around 4096 = elapsed time(usec): 6747645 = fault_around 65536 = elapsed time(usec): 6709263 0.5% gain. Even when I tested it with eMMC there is no gain because I guess with slow storage the major fault is the dominant factor. Also, fault_around has the side effect of shrinking slab more aggressively and causes higher vmpressure, so if such speculation fails, it can evict slab more which can result in page I/O (e.g., inode cache). In the end, it would make void any benefit of fault_around. So let's make the default "disabled" on those architectures. Link: http://lkml.kernel.org/r/20160518014229.GB21538@bbox Signed-off-by: Minchan Kim Cc: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index f29e5ab0342d..a1b93d9e4449 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2899,8 +2899,16 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, update_mmu_cache(vma, address, pte); } +/* + * If architecture emulates "accessed" or "young" bit without HW support, + * there is no much gain with fault_around. + */ static unsigned long fault_around_bytes __read_mostly = +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS + PAGE_SIZE; +#else rounddown_pow_of_two(65536); +#endif #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) -- cgit v1.3-8-gc7d7 From 315d09bf30c2b436a1fdac86d31c24380cd56c4f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 24 Jun 2016 14:49:45 -0700 Subject: Revert "mm: make faultaround produce old ptes" This reverts commit 5c0a85fad949212b3e059692deecdeed74ae7ec7. The commit causes ~6% regression in unixbench. Let's revert it for now and consider other solution for reclaim problem later. Link: http://lkml.kernel.org/r/1465893750-44080-2-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Reported-by: "Huang, Ying" Cc: Linus Torvalds Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Vinayak Menon Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- mm/filemap.c | 2 +- mm/memory.c | 23 +++++------------------ 3 files changed, 7 insertions(+), 20 deletions(-) (limited to 'mm/memory.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5df5feb49575..ece042dfe23c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon, bool old); + struct page *page, pte_t *pte, bool write, bool anon); #endif /* diff --git a/mm/filemap.c b/mm/filemap.c index 00ae878b2a38..20f3b1f33f0e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2186,7 +2186,7 @@ repeat: if (file->f_ra.mmap_miss > 0) file->f_ra.mmap_miss--; addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; - do_set_pte(vma, addr, page, pte, false, false, true); + do_set_pte(vma, addr, page, pte, false, false); unlock_page(page); goto next; unlock: diff --git a/mm/memory.c b/mm/memory.c index 15322b73636b..61fe7e7b56bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, * vm_ops->map_pages. */ void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon, bool old) + struct page *page, pte_t *pte, bool write, bool anon) { pte_t entry; @@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, entry = mk_pte(page, vma->vm_page_prot); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (old) - entry = pte_mkold(entry); if (anon) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address, false); @@ -3032,20 +3030,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { pte = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_same(*pte, orig_pte)) - goto unlock_out; do_fault_around(vma, address, pte, pgoff, flags); - /* Check if the fault is handled by faultaround */ - if (!pte_same(*pte, orig_pte)) { - /* - * Faultaround produce old pte, but the pte we've - * handler fault for should be young. - */ - pte_t entry = pte_mkyoung(*pte); - if (ptep_set_access_flags(vma, address, pte, entry, 0)) - update_mmu_cache(vma, address, pte); + if (!pte_same(*pte, orig_pte)) goto unlock_out; - } pte_unmap_unlock(pte, ptl); } @@ -3060,7 +3047,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, false, false, false); + do_set_pte(vma, address, fault_page, pte, false, false); unlock_page(fault_page); unlock_out: pte_unmap_unlock(pte, ptl); @@ -3111,7 +3098,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } goto uncharge_out; } - do_set_pte(vma, address, new_page, pte, true, true, false); + do_set_pte(vma, address, new_page, pte, true, true); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); pte_unmap_unlock(pte, ptl); @@ -3164,7 +3151,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, true, false, false); + do_set_pte(vma, address, fault_page, pte, true, false); pte_unmap_unlock(pte, ptl); if (set_page_dirty(fault_page)) -- cgit v1.3-8-gc7d7 From 06d8fbc7cfd655d202cfac9870bd79c18c2db698 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 24 Jun 2016 14:49:48 -0700 Subject: Revert "mm: disable fault around on emulated access bit architecture" This reverts commit d0834a6c2c5b0c76cfb806bd7dba6556d8b4edbb. After revert of 5c0a85fad949 ("mm: make faultaround produce old ptes") faultaround doesn't have dependencies on hardware accessed bit, so let's revert this one too. Link: http://lkml.kernel.org/r/1465893750-44080-3-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Reported-by: "Huang, Ying" Cc: Linus Torvalds Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Vinayak Menon Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 61fe7e7b56bf..cd1f29e4897e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2898,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, update_mmu_cache(vma, address, pte); } -/* - * If architecture emulates "accessed" or "young" bit without HW support, - * there is no much gain with fault_around. - */ static unsigned long fault_around_bytes __read_mostly = -#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS - PAGE_SIZE; -#else rounddown_pow_of_two(65536); -#endif #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) -- cgit v1.3-8-gc7d7