aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2016-05-19 17:12:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 19:12:14 -0700
commitfa9949da59a15017a02c86b087c7499d7b5702be (patch)
tree1191eac8b64eb3ba21b1286d9f3842db0388af82
parentmm: update_lru_size do the __mod_zone_page_state (diff)
downloadlinux-dev-fa9949da59a15017a02c86b087c7499d7b5702be.tar.xz
linux-dev-fa9949da59a15017a02c86b087c7499d7b5702be.zip
mm: use __SetPageSwapBacked and dont ClearPageSwapBacked
v3.16 commit 07a427884348 ("mm: shmem: avoid atomic operation during shmem_getpage_gfp") rightly replaced one instance of SetPageSwapBacked by __SetPageSwapBacked, pointing out that the newly allocated page is not yet visible to other users (except speculative get_page_unless_zero- ers, who may not update page flags before their further checks). That was part of a series in which Mel was focused on tmpfs profiles: but almost all SetPageSwapBacked uses can be so optimized, with the same justification. Remove ClearPageSwapBacked from __read_swap_cache_async() error path: it's not an error to free a page with PG_swapbacked set. Follow a convention of __SetPageLocked, __SetPageSwapBacked instead of doing it differently in different places; but that's for tidiness - if the ordering actually mattered, we should not be using the __variants. There's probably scope for further __SetPageFlags in other places, but SwapBacked is the one I'm interested in at the moment. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Reviewed-by: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap_state.c3
4 files changed, 7 insertions, 8 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index f9dfb18a4eba..53ab6398e7a2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -332,7 +332,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
- SetPageSwapBacked(newpage);
+ __SetPageSwapBacked(newpage);
return MIGRATEPAGE_SUCCESS;
}
@@ -378,7 +378,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
- SetPageSwapBacked(newpage);
+ __SetPageSwapBacked(newpage);
get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) {
@@ -1791,7 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
/* Prepare a page as a migration target */
__SetPageLocked(new_page);
- SetPageSwapBacked(new_page);
+ __SetPageSwapBacked(new_page);
/* anon mapping, we can simply copy page->mapping to the new page: */
new_page->mapping = page->mapping;
diff --git a/mm/rmap.c b/mm/rmap.c
index 4cebe8a7c2cb..8a839935b18c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1249,7 +1249,7 @@ void page_add_new_anon_rmap(struct page *page,
int nr = compound ? hpage_nr_pages(page) : 1;
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
- SetPageSwapBacked(page);
+ __SetPageSwapBacked(page);
if (compound) {
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/* increment count (starts at -1) */
diff --git a/mm/shmem.c b/mm/shmem.c
index e684a9140228..9e609d58df73 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1085,8 +1085,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
flush_dcache_page(newpage);
__SetPageLocked(newpage);
+ __SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
- SetPageSwapBacked(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);
@@ -1276,8 +1276,8 @@ repeat:
goto decused;
}
- __SetPageSwapBacked(page);
__SetPageLocked(page);
+ __SetPageSwapBacked(page);
if (sgp == SGP_WRITE)
__SetPageReferenced(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 366ce3518703..0d457e7db8d6 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -358,7 +358,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__SetPageLocked(new_page);
- SetPageSwapBacked(new_page);
+ __SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
@@ -370,7 +370,6 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page;
}
radix_tree_preload_end();
- ClearPageSwapBacked(new_page);
__ClearPageLocked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely