aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorjohn.hubbard@gmail.com <john.hubbard@gmail.com>2019-03-05 15:48:49 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:20 -0800
commit494eec70f054965e2e699db450cde2c08db1c008 (patch)
treee413ad49a7d913e30377c47ddc49813a58537884 /include/linux/pagemap.h
parentmm/migrate.c: cleanup expected_page_refs() (diff)
downloadwireguard-linux-494eec70f054965e2e699db450cde2c08db1c008.tar.xz
wireguard-linux-494eec70f054965e2e699db450cde2c08db1c008.zip
mm: page_cache_add_speculative(): refactor out some code duplication
From: John Hubbard <jhubbard@nvidia.com> This combines the common elements of these routines: page_cache_get_speculative() page_cache_add_speculative() This was anticipated by the original author, as shown by the comment in commit ce0ad7f095258 ("powerpc/mm: Lockless get_user_pages_fast() for 64-bit (v3)"): "Same as above, but add instead of inc (could just be merged)" There is no intention to introduce any behavioral change, but there is a small risk of that, due to slightly differing ways of expressing the TINY_RCU and related configurations. This also removes the VM_BUG_ON(in_interrupt()) that was in page_cache_add_speculative(), but not in page_cache_get_speculative(). This provides slightly less detection of such bugs, but it given that it was only there on the "add" path anyway, we can likely do without it just fine. And it removes the VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); that page_cache_add_speculative() had. Link: http://lkml.kernel.org/r/20190206231016.22734-2-jhubbard@nvidia.com Signed-off-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jeff Layton <jlayton@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h31
1 files changed, 9 insertions, 22 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e2d7039af6a3..b477a70cc2e4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -164,7 +164,7 @@ void release_pages(struct page **pages, int nr);
* will find the page or it will not. Likewise, the old find_get_page could run
* either before the insertion or afterwards, depending on timing.
*/
-static inline int page_cache_get_speculative(struct page *page)
+static inline int __page_cache_add_speculative(struct page *page, int count)
{
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
@@ -180,10 +180,10 @@ static inline int page_cache_get_speculative(struct page *page)
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_inc(page);
+ page_ref_add(page, count);
#else
- if (unlikely(!get_page_unless_zero(page))) {
+ if (unlikely(!page_ref_add_unless(page, count, 0))) {
/*
* Either the page has been freed, or will be freed.
* In either case, retry here and the caller should
@@ -197,27 +197,14 @@ static inline int page_cache_get_speculative(struct page *page)
return 1;
}
-/*
- * Same as above, but add instead of inc (could just be merged)
- */
-static inline int page_cache_add_speculative(struct page *page, int count)
+static inline int page_cache_get_speculative(struct page *page)
{
- VM_BUG_ON(in_interrupt());
-
-#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
-
-#else
- if (unlikely(!page_ref_add_unless(page, count, 0)))
- return 0;
-#endif
- VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
+ return __page_cache_add_speculative(page, 1);
+}
- return 1;
+static inline int page_cache_add_speculative(struct page *page, int count)
+{
+ return __page_cache_add_speculative(page, count);
}
#ifdef CONFIG_NUMA