aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-02-02 16:57:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-03 08:28:43 -0800
commite3ae19535c665771e2c03cdd63df9bc4d6b37941 (patch)
treed25d4a9c455874aac68e34e37d46fcb8d50b05ae /mm
parentthp: change deferred_split_count() to return number of THP in queue (diff)
downloadlinux-dev-e3ae19535c665771e2c03cdd63df9bc4d6b37941.tar.xz
linux-dev-e3ae19535c665771e2c03cdd63df9bc4d6b37941.zip
thp: limit number of object to scan on deferred_split_scan()
If we have a lot of pages in queue to be split, deferred_split_scan() can spend unreasonable amount of time under spinlock with disabled interrupts. Let's cap number of pages to split on scan by sc->nr_to_scan. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7aae72114583..c1411961167e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3478,17 +3478,19 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
int split = 0;
spin_lock_irqsave(&pgdata->split_queue_lock, flags);
- list_splice_init(&pgdata->split_queue, &list);
-
/* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
page = compound_head(page);
- /* race with put_compound_page() */
- if (!get_page_unless_zero(page)) {
+ if (get_page_unless_zero(page)) {
+ list_move(page_deferred_list(page), &list);
+ } else {
+ /* We lost race with put_compound_page() */
list_del_init(page_deferred_list(page));
pgdata->split_queue_len--;
}
+ if (!--sc->nr_to_scan)
+ break;
}
spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);