aboutsummaryrefslogtreecommitdiffstats
path: root/fs/erofs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/erofs')
-rw-r--r--fs/erofs/decompressor.c22
-rw-r--r--fs/erofs/internal.h4
-rw-r--r--fs/erofs/utils.c15
-rw-r--r--fs/erofs/xattr.c2
-rw-r--r--fs/erofs/xattr.h17
-rw-r--r--fs/erofs/zdata.c123
6 files changed, 76 insertions, 107 deletions
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 2890a67a1ded..5779a15c2cd6 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
}
src = kmap_atomic(*rq->in);
- if (!rq->out[0]) {
- dst = NULL;
- } else {
+ if (rq->out[0]) {
dst = kmap_atomic(rq->out[0]);
memcpy(dst + rq->pageofs_out, src, righthalf);
+ kunmap_atomic(dst);
}
- if (rq->out[1] == *rq->in) {
- memmove(src, src + righthalf, rq->pageofs_out);
- } else if (nrpages_out == 2) {
- if (dst)
- kunmap_atomic(dst);
+ if (nrpages_out == 2) {
DBG_BUGON(!rq->out[1]);
- dst = kmap_atomic(rq->out[1]);
- memcpy(dst, src + righthalf, rq->pageofs_out);
+ if (rq->out[1] == *rq->in) {
+ memmove(src, src + righthalf, rq->pageofs_out);
+ } else {
+ dst = kmap_atomic(rq->out[1]);
+ memcpy(dst, src + righthalf, rq->pageofs_out);
+ kunmap_atomic(dst);
+ }
}
- if (dst)
- kunmap_atomic(dst);
kunmap_atomic(src);
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1ed5beff7d11..c4c6dcdc89ad 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
#ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag);
+ pgoff_t index);
int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp, bool tag);
+ struct erofs_workgroup *grp);
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 1e8e1450d5b0..fddc5059c930 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -59,7 +59,7 @@ repeat:
}
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag)
+ pgoff_t index)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_workgroup *grp;
@@ -68,9 +68,6 @@ repeat:
rcu_read_lock();
grp = radix_tree_lookup(&sbi->workstn_tree, index);
if (grp) {
- *tag = xa_pointer_tag(grp);
- grp = xa_untag_pointer(grp);
-
if (erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */
rcu_read_unlock();
@@ -84,8 +81,7 @@ repeat:
}
int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp,
- bool tag)
+ struct erofs_workgroup *grp)
{
struct erofs_sb_info *sbi;
int err;
@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
sbi = EROFS_SB(sb);
xa_lock(&sbi->workstn_tree);
- grp = xa_tag_pointer(grp, tag);
-
/*
* Bump up reference count before making this workgroup
* visible to other users in order to avoid potential UAF
@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
+ DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
/*
* If managed cache is on, last refcount should indicate
@@ -201,7 +194,7 @@ repeat:
batch, first_index, PAGEVEC_SIZE);
for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
+ struct erofs_workgroup *grp = batch[i];
first_index = grp->index + 1;
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index a13a78725c57..b766c3ee5fa8 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -649,6 +649,8 @@ ssize_t erofs_listxattr(struct dentry *dentry,
struct listxattr_iter it;
ret = init_inode_xattrs(d_inode(dentry));
+ if (ret == -ENOATTR)
+ return 0;
if (ret)
return ret;
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 3585b84d2f20..50966f1c676e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
{
-static const struct xattr_handler *xattr_handler_map[] = {
- [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+ static const struct xattr_handler *xattr_handler_map[] = {
+ [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
+ &posix_acl_access_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &posix_acl_default_xattr_handler,
#endif
- [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+ [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
- [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+ [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
#endif
-};
+ };
return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
xattr_handler_map[idx] : NULL;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index ca99425a4536..80e47f07d946 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
struct z_erofs_pcluster *pcl;
struct z_erofs_collection *cl;
unsigned int length;
- bool tag;
- grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
+ grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
if (!grp)
return -ENOENT;
@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
*/
mutex_trylock(&cl->lock);
- err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
+ err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
if (err) {
mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl);
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
-static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
- unsigned int nr_bios, bool force_fg)
-{
- /*
- * although background is preferred, no one is pending for submission.
- * don't issue workqueue for decompression but drop it directly instead.
- */
- if (force_fg || nr_bios)
- return false;
-
- kvfree(q[JQ_SUBMIT]);
- return true;
-}
-
-static bool z_erofs_submit_queue(struct super_block *sb,
+static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
- struct bio *bio;
void *bi_private;
/* since bio will be NULL, no need to initialize last_index */
pgoff_t uninitialized_var(last_index);
- bool force_submit = false;
- unsigned int nr_bios;
-
- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
- return false;
+ unsigned int nr_bios = 0;
+ struct bio *bio = NULL;
- force_submit = false;
- bio = NULL;
- nr_bios = 0;
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
do {
struct z_erofs_pcluster *pcl;
- unsigned int clusterpages;
- pgoff_t first_index;
- struct page *page;
- unsigned int i = 0, bypass = 0;
- int err;
+ pgoff_t cur, end;
+ unsigned int i = 0;
+ bool bypass = true;
/* no possible 'owned_head' equals the following */
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
@@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
- clusterpages = BIT(pcl->clusterbits);
+ cur = pcl->obj.index;
+ end = cur + BIT(pcl->clusterbits);
/* close the main owned chain at first */
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
Z_EROFS_PCLUSTER_TAIL_CLOSED);
- first_index = pcl->obj.index;
- force_submit |= (first_index != last_index + 1);
+ do {
+ struct page *page;
+ int err;
-repeat:
- page = pickup_page_for_submission(pcl, i, pagepool,
- MNGD_MAPPING(sbi),
- GFP_NOFS);
- if (!page) {
- force_submit = true;
- ++bypass;
- goto skippage;
- }
+ page = pickup_page_for_submission(pcl, i++, pagepool,
+ MNGD_MAPPING(sbi),
+ GFP_NOFS);
+ if (!page)
+ continue;
- if (bio && force_submit) {
+ if (bio && cur != last_index + 1) {
submit_bio_retry:
- submit_bio(bio);
- bio = NULL;
- }
-
- if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ submit_bio(bio);
+ bio = NULL;
+ }
- bio->bi_end_io = z_erofs_decompressqueue_endio;
- bio_set_dev(bio, sb->s_bdev);
- bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
- LOG_SECTORS_PER_BLOCK;
- bio->bi_private = bi_private;
- bio->bi_opf = REQ_OP_READ;
+ if (!bio) {
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- ++nr_bios;
- }
+ bio->bi_end_io = z_erofs_decompressqueue_endio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)cur <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_private = bi_private;
+ bio->bi_opf = REQ_OP_READ;
+ ++nr_bios;
+ }
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
- force_submit = false;
- last_index = first_index + i;
-skippage:
- if (++i < clusterpages)
- goto repeat;
+ last_index = cur;
+ bypass = false;
+ } while (++cur < end);
- if (bypass < clusterpages)
+ if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
move_to_bypass_jobqueue(pcl, qtail, owned_head);
@@ -1263,11 +1234,15 @@ skippage:
if (bio)
submit_bio(bio);
- if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
- return true;
-
+ /*
+ * although background is preferred, no one is pending for submission.
+ * don't issue workqueue for decompression but drop it directly instead.
+ */
+ if (!*force_fg && !nr_bios) {
+ kvfree(q[JQ_SUBMIT]);
+ return;
+ }
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
- return true;
}
static void z_erofs_runqueue(struct super_block *sb,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
- if (!z_erofs_submit_queue(sb, clt->owned_head,
- pagepool, io, &force_fg))
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
+ z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);