aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-02-02 16:57:12 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-03 08:28:43 -0800
commitcb8d68ec16a511f8be7e1028fd8f869ef7c6a1a8 (patch)
treebca734285ef34b21cbdc7d3c3873981c41c76d4e /mm
parentthp: make split_queue per-node (diff)
downloadlinux-dev-cb8d68ec16a511f8be7e1028fd8f869ef7c6a1a8.tar.xz
linux-dev-cb8d68ec16a511f8be7e1028fd8f869ef7c6a1a8.zip
thp: change deferred_split_count() to return number of THP in queue
I've got meaning of shrinker::count_objects() wrong: it should return number of potentially freeable objects, which is not necessary correlate with freeable memory. Returning 256 per THP in queue is not reasonable: shrinker::scan_objects() never called with nr_to_scan > 128 in my setup. Let's return 1 per THP and correct scan_object accordingly. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 253a25e007d7..7aae72114583 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3465,12 +3465,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- /*
- * Split a page from split_queue will free up at least one page,
- * at most HPAGE_PMD_NR - 1. We don't track exact number.
- * Let's use HPAGE_PMD_NR / 2 as ballpark.
- */
- return ACCESS_ONCE(pgdata->split_queue_len) * HPAGE_PMD_NR / 2;
+ return ACCESS_ONCE(pgdata->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
@@ -3511,7 +3506,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
list_splice_tail(&list, &pgdata->split_queue);
spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
- return split * HPAGE_PMD_NR / 2;
+ /*
+ * Stop shrinker if we didn't split any page, but the queue is empty.
+ * This can happen if pages were freed under us.
+ */
+ if (!split && list_empty(&pgdata->split_queue))
+ return SHRINK_STOP;
+ return split;
}
static struct shrinker deferred_split_shrinker = {