From d516904bd239fe2c9f1bd46cf146bb4b8831321c Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 8 Oct 2012 16:29:48 -0700 Subject: thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan There are two pre-alloc operations in these two function, the different is: - it allows to sleep if page alloc fail in khugepaged_loop - it exits immediately if page alloc fail in khugepaged_do_scan Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on the first failure, then the operation in khugepaged_loop can be removed Signed-off-by: Xiao Guangrong Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 97 ++++++++++++++++++++++++++------------------------------ 1 file changed, 45 insertions(+), 52 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1e21b4cf4c75..d5b5fcc73c44 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2222,10 +2222,40 @@ static int khugepaged_wait_event(void) kthread_should_stop(); } -static void khugepaged_do_scan(struct page **hpage) +static void khugepaged_alloc_sleep(void) +{ + wait_event_freezable_timeout(khugepaged_wait, false, + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); +} + +#ifndef CONFIG_NUMA +static struct page *khugepaged_alloc_hugepage(bool *wait) +{ + struct page *hpage; + + do { + hpage = alloc_hugepage(khugepaged_defrag()); + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!*wait) + return NULL; + + *wait = false; + khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); + } while (unlikely(!hpage) && likely(khugepaged_enabled())); + + return hpage; +} +#endif + +static void khugepaged_do_scan(void) { + struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; unsigned int pages = khugepaged_pages_to_scan; + bool wait = true; barrier(); /* write khugepaged_pages_to_scan to local stack */ @@ -2233,17 +2263,18 @@ static void khugepaged_do_scan(struct page **hpage) cond_resched(); #ifndef CONFIG_NUMA - if (!*hpage) { - *hpage = alloc_hugepage(khugepaged_defrag()); - if (unlikely(!*hpage)) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!hpage) + hpage = khugepaged_alloc_hugepage(&wait); + + if (unlikely(!hpage)) + break; +#else + if (IS_ERR(hpage)) { + if (!wait) break; - } - count_vm_event(THP_COLLAPSE_ALLOC); + wait = false; + khugepaged_alloc_sleep(); } -#else - if (IS_ERR(*hpage)) - break; #endif if (unlikely(kthread_should_stop() || freezing(current))) @@ -2255,37 +2286,16 @@ static void khugepaged_do_scan(struct page **hpage) if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, - hpage); + &hpage); else progress = pages; spin_unlock(&khugepaged_mm_lock); } -} -static void khugepaged_alloc_sleep(void) -{ - wait_event_freezable_timeout(khugepaged_wait, false, - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + if (!IS_ERR_OR_NULL(hpage)) + put_page(hpage); } -#ifndef CONFIG_NUMA -static struct page *khugepaged_alloc_hugepage(void) -{ - struct page *hpage; - - do { - hpage = alloc_hugepage(khugepaged_defrag()); - if (!hpage) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - khugepaged_alloc_sleep(); - } else - count_vm_event(THP_COLLAPSE_ALLOC); - } while (unlikely(!hpage) && - likely(khugepaged_enabled())); - return hpage; -} -#endif - static void khugepaged_wait_work(void) { try_to_freeze(); @@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void) static void khugepaged_loop(void) { - struct page *hpage = NULL; - while (likely(khugepaged_enabled())) { -#ifndef CONFIG_NUMA - hpage = khugepaged_alloc_hugepage(); - if (unlikely(!hpage)) - break; -#else - if (IS_ERR(hpage)) { - khugepaged_alloc_sleep(); - hpage = NULL; - } -#endif - - khugepaged_do_scan(&hpage); - - if (!IS_ERR_OR_NULL(hpage)) - put_page(hpage); - + khugepaged_do_scan(); khugepaged_wait_work(); } } -- cgit v1.2.3-59-g8ed1b