aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/erofs
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2019-11-21 21:59:54 +0800
committerGao Xiang <gaoxiang25@huawei.com>2019-11-24 10:57:48 +0800
commit5ddcee1f3a1ccaccb31bc17080f75a0bb13b4906 (patch)
tree2588e73eefe6da892ba830d3a60a7c0afaa158cf /fs/erofs
parenterofs: remove dead code since managed cache is now built-in (diff)
downloadwireguard-linux-5ddcee1f3a1ccaccb31bc17080f75a0bb13b4906.tar.xz
wireguard-linux-5ddcee1f3a1ccaccb31bc17080f75a0bb13b4906.zip
erofs: get rid of __stagingpage_alloc helper
Now open code is much cleaner due to iterative development. Link: https://lore.kernel.org/r/20191124025217.12345-1-hsiangkao@aol.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Diffstat (limited to 'fs/erofs')
-rw-r--r--fs/erofs/decompressor.c2
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/utils.c4
-rw-r--r--fs/erofs/zdata.c37
4 files changed, 21 insertions, 24 deletions
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 19f89f9fb10c..2890a67a1ded 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top];
get_page(victim);
} else {
- victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
+ victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim)
return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 544a453f3076..0c1175a08e54 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index f66043ee16b9..1e8e1450d5b0 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -7,7 +7,7 @@
#include "internal.h"
#include <linux/pagevec.h>
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{
struct page *page;
@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru);
} else {
- page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
+ page = alloc_page(gfp);
}
return page;
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 93f8bc1a64f6..1c582a3a40a3 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
return true;
}
-static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
- gfp_t gfp)
-{
- struct page *page = erofs_allocpage(pagepool, gfp, true);
-
- page->mapping = Z_EROFS_MAPPING_STAGING;
- return page;
-}
-
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy,
erofs_off_t la)
@@ -661,8 +652,9 @@ retry:
/* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
- __stagingpage_alloc(pagepool, GFP_NOFS);
+ erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
+ newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err)
@@ -1079,19 +1071,24 @@ repeat:
unlock_page(page);
put_page(page);
out_allocpage:
- page = __stagingpage_alloc(pagepool, gfp);
- if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
- list_add(&page->lru, pagepool);
- cpu_relax();
- goto repeat;
- }
- if (!tocache)
- goto out;
- if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
+ if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ /* non-LRU / non-movable temporary page is needed */
page->mapping = Z_EROFS_MAPPING_STAGING;
- goto out;
+ tocache = false;
}
+ if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+ if (tocache) {
+ /* since it added to managed cache successfully */
+ unlock_page(page);
+ put_page(page);
+ } else {
+ list_add(&page->lru, pagepool);
+ }
+ cond_resched();
+ goto repeat;
+ }
set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page);
out: /* the only exit (for tracing and debugging) */