diff options
Diffstat (limited to 'fs/erofs/utils.c')
-rw-r--r-- | fs/erofs/utils.c | 141 |
1 files changed, 54 insertions, 87 deletions
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index fddc5059c930..46627cb69abe 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -1,45 +1,38 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang <gaoxiang25@huawei.com> + * https://www.huawei.com/ */ #include "internal.h" #include <linux/pagevec.h> -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) +struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) { - struct page *page; + struct page *page = *pagepool; - if (!list_empty(pool)) { - page = lru_to_page(pool); + if (page) { DBG_BUGON(page_ref_count(page) != 1); - list_del(&page->lru); + *pagepool = (struct page *)page_private(page); } else { page = alloc_page(gfp); } return page; } -#if (EROFS_PCPUBUF_NR_PAGES > 0) -static struct { - u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; -} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; - -void *erofs_get_pcpubuf(unsigned int pagenr) +void erofs_release_pages(struct page **pagepool) { - preempt_disable(); - return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; + while (*pagepool) { + struct page *page = *pagepool; + + *pagepool = (struct page *)page_private(page); + put_page(page); + } } -#endif #ifdef CONFIG_EROFS_FS_ZIP /* global shrink count (for all mounted EROFS instances) */ static atomic_long_t erofs_global_shrink_cnt; -#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) -#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) - static int erofs_workgroup_get(struct erofs_workgroup *grp) { int o; @@ -66,7 +59,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, repeat: rcu_read_lock(); - grp = radix_tree_lookup(&sbi->workstn_tree, index); + grp = xa_load(&sbi->managed_pslots, index); if (grp) { if (erofs_workgroup_get(grp)) { /* prefer to relax rcu read side */ @@ -80,43 +73,37 @@ repeat: return grp; } -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp) +struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, + struct erofs_workgroup *grp) { - struct erofs_sb_info *sbi; - int err; - - /* grp shouldn't be broken or used before */ - if (atomic_read(&grp->refcount) != 1) { - DBG_BUGON(1); - return -EINVAL; - } - - err = radix_tree_preload(GFP_NOFS); - if (err) - return err; - - sbi = EROFS_SB(sb); - xa_lock(&sbi->workstn_tree); + struct erofs_sb_info *const sbi = EROFS_SB(sb); + struct erofs_workgroup *pre; /* - * Bump up reference count before making this workgroup - * visible to other users in order to avoid potential UAF - * without serialized by workstn_lock. + * Bump up a reference count before making this visible + * to others for the XArray in order to avoid potential + * UAF without serialized by xa_lock. */ - __erofs_workgroup_get(grp); + atomic_inc(&grp->refcount); - err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); - if (err) - /* - * it's safe to decrease since the workgroup isn't visible - * and refcount >= 2 (cannot be freezed). - */ - __erofs_workgroup_put(grp); - - xa_unlock(&sbi->workstn_tree); - radix_tree_preload_end(); - return err; +repeat: + xa_lock(&sbi->managed_pslots); + pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, + NULL, grp, GFP_NOFS); + if (pre) { + if (xa_is_err(pre)) { + pre = ERR_PTR(xa_err(pre)); + } else if (erofs_workgroup_get(pre)) { + /* try to legitimize the current in-tree one */ + xa_unlock(&sbi->managed_pslots); + cond_resched(); + goto repeat; + } + atomic_dec(&grp->refcount); + grp = pre; + } + xa_unlock(&sbi->managed_pslots); + return grp; } static void __erofs_workgroup_free(struct erofs_workgroup *grp) @@ -136,12 +123,6 @@ int erofs_workgroup_put(struct erofs_workgroup *grp) return count; } -static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) -{ - erofs_workgroup_unfreeze(grp, 0); - __erofs_workgroup_free(grp); -} - static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, struct erofs_workgroup *grp) { @@ -155,7 +136,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, /* * Note that all cached pages should be unattached - * before deleted from the radix tree. Otherwise some + * before deleted from the XArray. Otherwise some * cached pages could be still attached to the orphan * old workgroup when the new one is available in the tree. */ @@ -169,47 +150,34 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp); + DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp); - /* - * If managed cache is on, last refcount should indicate - * the related workstation. - */ - erofs_workgroup_unfreeze_final(grp); + /* last refcount should be connected with its managed pslot. */ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); return true; } static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned long nr_shrink) { - pgoff_t first_index = 0; - void *batch[PAGEVEC_SIZE]; + struct erofs_workgroup *grp; unsigned int freed = 0; + unsigned long index; - int i, found; -repeat: - xa_lock(&sbi->workstn_tree); - - found = radix_tree_gang_lookup(&sbi->workstn_tree, - batch, first_index, PAGEVEC_SIZE); - - for (i = 0; i < found; ++i) { - struct erofs_workgroup *grp = batch[i]; - - first_index = grp->index + 1; - + xa_lock(&sbi->managed_pslots); + xa_for_each(&sbi->managed_pslots, index, grp) { /* try to shrink each valid workgroup */ if (!erofs_try_to_release_workgroup(sbi, grp)) continue; + xa_unlock(&sbi->managed_pslots); ++freed; if (!--nr_shrink) - break; + return freed; + xa_lock(&sbi->managed_pslots); } - xa_unlock(&sbi->workstn_tree); - - if (i && nr_shrink) - goto repeat; + xa_unlock(&sbi->managed_pslots); return freed; } @@ -286,7 +254,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, spin_unlock(&erofs_sb_list_lock); sbi->shrinker_run_no = run_no; - freed += erofs_shrink_workstation(sbi, nr); + freed += erofs_shrink_workstation(sbi, nr - freed); spin_lock(&erofs_sb_list_lock); /* Get the next list element before we move this one */ @@ -314,7 +282,7 @@ static struct shrinker erofs_shrinker_info = { int __init erofs_init_shrinker(void) { - return register_shrinker(&erofs_shrinker_info); + return register_shrinker(&erofs_shrinker_info, "erofs-shrinker"); } void erofs_exit_shrinker(void) @@ -322,4 +290,3 @@ void erofs_exit_shrinker(void) unregister_shrinker(&erofs_shrinker_info); } #endif /* !CONFIG_EROFS_FS_ZIP */ - |