aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 18:58:29 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 18:58:29 -0800
commita88cc8da0279f8e481b0d90e51a0a1cffac55906 (patch)
tree4be3f8598d4146e3ea2f4f344a140d9c18f11932 /mm/rmap.c
parentarch/openrisc: Fix issues with access_ok() (diff)
parentmm, page_alloc: do not wake kswapd with zone lock held (diff)
downloadwireguard-linux-a88cc8da0279f8e481b0d90e51a0a1cffac55906.tar.xz
wireguard-linux-a88cc8da0279f8e481b0d90e51a0a1cffac55906.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: do not wake kswapd with zone lock held hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization" hugetlbfs: revert "Use i_mmap_rwsem to fix page fault/truncate race" mm: page_mapped: don't assume compound page is huge or THP mm/memory.c: initialise mmu_notifier_range correctly tools/vm/page_owner: use page_owner_sort in the use example kasan: fix krealloc handling for tag-based mode kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning mm, memcg: fix reclaim deadlock with writeback mm/usercopy.c: no check page span for stack objects slab: alien caches must not be initialized if the allocation of the alien cache failed fork, memcg: fix cached_stacks case zram: idle writeback fixes and cleanup
Diffstat (limited to '')
-rw-r--r--mm/rmap.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 21a26cf51114..68a1a5b869a5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,7 +25,6 @@
* page->flags PG_locked (lock_page)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem
- * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1379,9 +1378,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
- *
- * If called for a huge page, caller must hold i_mmap_rwsem
- * in write mode as it is possible to call huge_pmd_unshare.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);