aboutsummaryrefslogtreecommitdiffstats
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorYang Shi <shy828301@gmail.com>2022-05-19 14:08:49 -0700
committerakpm <akpm@linux-foundation.org>2022-05-19 14:08:49 -0700
commitcb648754a1d0dbac7cbcfe8d42cb27fa6063547d (patch)
tree4396f096ac96825004c600ca61e976249bb14dd6 /mm/khugepaged.c
parentsched: coredump.h: clarify the use of MMF_VM_HUGEPAGE (diff)
downloadlinux-dev-cb648754a1d0dbac7cbcfe8d42cb27fa6063547d.tar.xz
linux-dev-cb648754a1d0dbac7cbcfe8d42cb27fa6063547d.zip
mm: khugepaged: remove redundant check for VM_NO_KHUGEPAGED
The hugepage_vma_check() called by khugepaged_enter_vma_merge() does check VM_NO_KHUGEPAGED. Remove the check from caller and move the check in hugepage_vma_check() up. More checks may be run for VM_NO_KHUGEPAGED vmas, but MADV_HUGEPAGE is definitely not a hot path, so cleaner code does outweigh. Link: https://lkml.kernel.org/r/20220510203222.24246-3-shy828301@gmail.com Signed-off-by: Yang Shi <shy828301@gmail.com> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Acked-by: Song Liu <song@kernel.org> Acked-by: Vlastmil Babka <vbabka@suse.cz> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Song Liu <songliubraving@fb.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2243ed095f02..e53b4826c7bd 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -365,8 +365,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
* register it here without waiting a page fault that
* may not happen any time soon.
*/
- if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
- khugepaged_enter_vma_merge(vma, *vm_flags))
+ if (khugepaged_enter_vma_merge(vma, *vm_flags))
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
@@ -445,6 +444,9 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
if (!transhuge_vma_enabled(vma, vm_flags))
return false;
+ if (vm_flags & VM_NO_KHUGEPAGED)
+ return false;
+
if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
vma->vm_pgoff, HPAGE_PMD_NR))
return false;
@@ -470,7 +472,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
return false;
if (vma_is_temporary_stack(vma))
return false;
- return !(vm_flags & VM_NO_KHUGEPAGED);
+
+ return true;
}
int __khugepaged_enter(struct mm_struct *mm)