aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/erofs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/erofs')
-rw-r--r--drivers/staging/erofs/Kconfig9
-rw-r--r--drivers/staging/erofs/data.c105
-rw-r--r--drivers/staging/erofs/dir.c15
-rw-r--r--drivers/staging/erofs/erofs_fs.h11
-rw-r--r--drivers/staging/erofs/include/trace/events/erofs.h20
-rw-r--r--drivers/staging/erofs/inode.c50
-rw-r--r--drivers/staging/erofs/internal.h111
-rw-r--r--drivers/staging/erofs/namei.c47
-rw-r--r--drivers/staging/erofs/super.c96
-rw-r--r--drivers/staging/erofs/unzip_vle.c447
-rw-r--r--drivers/staging/erofs/unzip_vle.h12
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c69
-rw-r--r--drivers/staging/erofs/utils.c20
-rw-r--r--drivers/staging/erofs/xattr.c196
14 files changed, 720 insertions, 488 deletions
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 663b755bf2fb..c8521d71039b 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -78,6 +78,15 @@ config EROFS_FAULT_INJECTION
Test EROFS to inject faults such as ENOMEM, EIO, and so on.
If unsure, say N.
+config EROFS_FS_IO_MAX_RETRIES
+ int "EROFS IO Maximum Retries"
+ depends on EROFS_FS
+ default "5"
+ help
+ Maximum retry count of IO Errors.
+
+ If unsure, leave the default value (5 retries, 6 IOs at most).
+
config EROFS_FS_ZIP
bool "EROFS Data Compresssion Support"
depends on EROFS_FS
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a180253..6384f73e5418 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
struct page *page = bvec->bv_page;
/* page is already locked */
- BUG_ON(PageUptodate(page));
+ DBG_BUGON(PageUptodate(page));
if (unlikely(err))
SetPageError(page);
@@ -39,38 +39,50 @@ static inline void read_endio(struct bio *bio)
}
/* prio -- true is used for dir */
-struct page *erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio)
+struct page *__erofs_get_meta_page(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio, bool nofail)
{
- struct inode *bd_inode = sb->s_bdev->bd_inode;
- struct address_space *mapping = bd_inode->i_mapping;
+ struct inode *const bd_inode = sb->s_bdev->bd_inode;
+ struct address_space *const mapping = bd_inode->i_mapping;
+ /* prefer retrying in the allocator to blindly looping below */
+ const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
+ (nofail ? __GFP_NOFAIL : 0);
+ unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
struct page *page;
+ int err;
repeat:
- page = find_or_create_page(mapping, blkaddr,
- /*
- * Prefer looping in the allocator rather than here,
- * at least that code knows what it's doing.
- */
- mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
-
- BUG_ON(!page || !PageLocked(page));
+ page = find_or_create_page(mapping, blkaddr, gfp);
+ if (unlikely(page == NULL)) {
+ DBG_BUGON(nofail);
+ return ERR_PTR(-ENOMEM);
+ }
+ DBG_BUGON(!PageLocked(page));
if (!PageUptodate(page)) {
struct bio *bio;
- int err;
- bio = prepare_bio(sb, blkaddr, 1, read_endio);
+ bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
+ if (IS_ERR(bio)) {
+ DBG_BUGON(nofail);
+ err = PTR_ERR(bio);
+ goto err_out;
+ }
+
err = bio_add_page(bio, page, PAGE_SIZE, 0);
- BUG_ON(err != PAGE_SIZE);
+ if (unlikely(err != PAGE_SIZE)) {
+ err = -EFAULT;
+ goto err_out;
+ }
__submit_bio(bio, REQ_OP_READ,
REQ_META | (prio ? REQ_PRIO : 0));
lock_page(page);
- /* the page has been truncated by others? */
+ /* this page has been truncated by others */
if (unlikely(page->mapping != mapping)) {
+unlock_repeat:
unlock_page(page);
put_page(page);
goto repeat;
@@ -78,25 +90,32 @@ repeat:
/* more likely a read error */
if (unlikely(!PageUptodate(page))) {
- unlock_page(page);
- put_page(page);
-
- page = ERR_PTR(-EIO);
+ if (io_retries) {
+ --io_retries;
+ goto unlock_repeat;
+ }
+ err = -EIO;
+ goto err_out;
}
}
return page;
+
+err_out:
+ unlock_page(page);
+ put_page(page);
+ return ERR_PTR(err);
}
static int erofs_map_blocks_flatmode(struct inode *inode,
struct erofs_map_blocks *map,
int flags)
{
+ int err = 0;
erofs_blk_t nblocks, lastblk;
u64 offset = map->m_la;
struct erofs_vnode *vi = EROFS_V(inode);
trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
- BUG_ON(is_inode_layout_compression(inode));
nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
lastblk = nblocks - is_inode_layout_inline(inode);
@@ -123,18 +142,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
map->m_plen = inode->i_size - offset;
/* inline data should locate in one meta block */
- BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
+ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
+ }
+
map->m_flags |= EROFS_MAP_META;
} else {
errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
vi->nid, inode->i_size, map->m_la);
- BUG();
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
}
out:
map->m_llen = map->m_plen;
+
+err_out:
trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
- return 0;
+ return err;
}
#ifdef CONFIG_EROFS_FS_ZIP
@@ -183,14 +211,14 @@ static inline struct bio *erofs_read_raw_page(
struct address_space *mapping,
struct page *page,
erofs_off_t *last_block,
- unsigned nblocks,
+ unsigned int nblocks,
bool ra)
{
struct inode *inode = mapping->host;
erofs_off_t current_block = (erofs_off_t)page->index;
int err;
- BUG_ON(!nblocks);
+ DBG_BUGON(!nblocks);
if (PageUptodate(page)) {
err = 0;
@@ -217,7 +245,7 @@ submit_bio_retry:
.m_la = blknr_to_addr(current_block),
};
erofs_blk_t blknr;
- unsigned blkoff;
+ unsigned int blkoff;
err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
if (unlikely(err))
@@ -233,7 +261,7 @@ submit_bio_retry:
}
/* for RAW access mode, m_plen must be equal to m_llen */
- BUG_ON(map.m_plen != map.m_llen);
+ DBG_BUGON(map.m_plen != map.m_llen);
blknr = erofs_blknr(map.m_pa);
blkoff = erofs_blkoff(map.m_pa);
@@ -243,7 +271,7 @@ submit_bio_retry:
void *vsrc, *vto;
struct page *ipage;
- BUG_ON(map.m_plen > PAGE_SIZE);
+ DBG_BUGON(map.m_plen > PAGE_SIZE);
ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
@@ -270,7 +298,7 @@ submit_bio_retry:
}
/* pa must be block-aligned for raw reading */
- BUG_ON(erofs_blkoff(map.m_pa) != 0);
+ DBG_BUGON(erofs_blkoff(map.m_pa));
/* max # of continuous pages */
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
@@ -278,7 +306,14 @@ submit_bio_retry:
if (nblocks > BIO_MAX_PAGES)
nblocks = BIO_MAX_PAGES;
- bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
+ bio = erofs_grab_bio(inode->i_sb,
+ blknr, nblocks, read_endio, false);
+
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
+ bio = NULL;
+ goto err_out;
+ }
}
err = bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -331,13 +366,13 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
if (IS_ERR(bio))
return PTR_ERR(bio);
- BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
+ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
return 0;
}
static int erofs_raw_access_readpages(struct file *filp,
struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+ struct list_head *pages, unsigned int nr_pages)
{
erofs_off_t last_block;
struct bio *bio = NULL;
@@ -369,7 +404,7 @@ static int erofs_raw_access_readpages(struct file *filp,
/* pages could still be locked */
put_page(page);
}
- BUG_ON(!list_empty(pages));
+ DBG_BUGON(!list_empty(pages));
/* the rare case (end in gaps) */
if (unlikely(bio != NULL))
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index be6ae3b1bdbe..d1cb0d78ab84 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -24,8 +24,8 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
};
static int erofs_fill_dentries(struct dir_context *ctx,
- void *dentry_blk, unsigned *ofs,
- unsigned nameoff, unsigned maxsize)
+ void *dentry_blk, unsigned int *ofs,
+ unsigned int nameoff, unsigned int maxsize)
{
struct erofs_dirent *de = dentry_blk;
const struct erofs_dirent *end = dentry_blk + nameoff;
@@ -36,7 +36,7 @@ static int erofs_fill_dentries(struct dir_context *ctx,
int de_namelen;
unsigned char d_type;
#ifdef CONFIG_EROFS_FS_DEBUG
- unsigned dbg_namelen;
+ unsigned int dbg_namelen;
unsigned char dbg_namebuf[EROFS_NAME_LEN];
#endif
@@ -81,15 +81,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
struct inode *dir = file_inode(f);
struct address_space *mapping = dir->i_mapping;
const size_t dirsize = i_size_read(dir);
- unsigned i = ctx->pos / EROFS_BLKSIZ;
- unsigned ofs = ctx->pos % EROFS_BLKSIZ;
+ unsigned int i = ctx->pos / EROFS_BLKSIZ;
+ unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
int err = 0;
bool initial = true;
while (ctx->pos < dirsize) {
struct page *dentry_page;
struct erofs_dirent *de;
- unsigned nameoff, maxsize;
+ unsigned int nameoff, maxsize;
dentry_page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(dentry_page))
@@ -109,7 +109,8 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
goto skip_this;
}
- maxsize = min_t(unsigned, dirsize - ctx->pos + ofs, PAGE_SIZE);
+ maxsize = min_t(unsigned int,
+ dirsize - ctx->pos + ofs, PAGE_SIZE);
/* search dirents at the arbitrary position */
if (unlikely(initial)) {
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
index 2f8e2bf70941..d4bffa2852b3 100644
--- a/drivers/staging/erofs/erofs_fs.h
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -202,6 +202,14 @@ struct erofs_extent_header {
* di_u.delta[1] = distance to its corresponding tail cluster
* (di_advise could be 0, 1 or 2)
*/
+enum {
+ Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+ Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+ Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+ Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
@@ -260,6 +268,9 @@ static inline void erofs_check_ondisk_layout_definitions(void)
BUILD_BUG_ON(sizeof(struct erofs_extent_header) != 16);
BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+
+ BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
}
#endif
diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h
index 5aead93a762f..660c92fc1803 100644
--- a/drivers/staging/erofs/include/trace/events/erofs.h
+++ b/drivers/staging/erofs/include/trace/events/erofs.h
@@ -162,7 +162,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s",
show_dev_nid(__entry),
- __entry->la, __entry->llen, show_map_flags(__entry->flags))
+ __entry->la, __entry->llen,
+ __entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
@@ -172,6 +173,13 @@ DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
TP_ARGS(inode, map, flags)
);
+DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned int flags),
+
+ TP_ARGS(inode, map, flags)
+);
+
DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret),
@@ -204,7 +212,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_printk("dev = (%d,%d), nid = %llu, flags %s "
"la %llu pa %llu llen %llu plen %llu mflags %s ret %d",
- show_dev_nid(__entry), show_map_flags(__entry->flags),
+ show_dev_nid(__entry),
+ __entry->flags ? show_map_flags(__entry->flags) : "NULL",
__entry->la, __entry->pa, __entry->llen, __entry->plen,
show_mflags(__entry->mflags), __entry->ret)
);
@@ -216,6 +225,13 @@ DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
TP_ARGS(inode, map, flags, ret)
);
+DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned int flags, int ret),
+
+ TP_ARGS(inode, map, flags, ret)
+);
+
TRACE_EVENT(erofs_destroy_inode,
TP_PROTO(struct inode *inode),
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index fbf6ff25cd1b..04c61a9d7b76 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -19,7 +19,7 @@ static int read_inode(struct inode *inode, void *data)
{
struct erofs_vnode *vi = EROFS_V(inode);
struct erofs_inode_v1 *v1 = data;
- const unsigned advise = le16_to_cpu(v1->i_advise);
+ const unsigned int advise = le16_to_cpu(v1->i_advise);
vi->data_mapping_mode = __inode_data_mapping(advise);
@@ -112,7 +112,8 @@ static int read_inode(struct inode *inode, void *data)
* try_lock since it takes no much overhead and
* will success immediately.
*/
-static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
+static int fill_inline_data(struct inode *inode, void *data,
+ unsigned int m_pofs)
{
struct erofs_vnode *vi = EROFS_V(inode);
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
@@ -152,7 +153,7 @@ static int fill_inode(struct inode *inode, int isdir)
void *data;
int err;
erofs_blk_t blkaddr;
- unsigned ofs;
+ unsigned int ofs;
trace_erofs_fill_inode(inode, isdir);
@@ -231,10 +232,45 @@ out_unlock:
return err;
}
+/*
+ * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
+ * we should do more for 32-bit platform to find the right inode.
+ */
+#if BITS_PER_LONG == 32
+static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ return EROFS_V(inode)->nid == nid;
+}
+
+static int erofs_iget_set_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ inode->i_ino = erofs_inode_hash(nid);
+ return 0;
+}
+#endif
+
+static inline struct inode *erofs_iget_locked(struct super_block *sb,
+ erofs_nid_t nid)
+{
+ const unsigned long hashval = erofs_inode_hash(nid);
+
+#if BITS_PER_LONG >= 64
+ /* it is safe to use iget_locked for >= 64-bit platform */
+ return iget_locked(sb, hashval);
+#else
+ return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
+ erofs_iget_set_actor, &nid);
+#endif
+}
+
struct inode *erofs_iget(struct super_block *sb,
erofs_nid_t nid, bool isdir)
{
- struct inode *inode = iget_locked(sb, nid);
+ struct inode *inode = erofs_iget_locked(sb, nid);
if (unlikely(inode == NULL))
return ERR_PTR(-ENOMEM);
@@ -259,22 +295,16 @@ struct inode *erofs_iget(struct super_block *sb,
const struct inode_operations erofs_generic_xattr_iops = {
.listxattr = erofs_listxattr,
};
-#endif
-#ifdef CONFIG_EROFS_FS_XATTR
const struct inode_operations erofs_symlink_xattr_iops = {
.get_link = page_get_link,
.listxattr = erofs_listxattr,
};
-#endif
const struct inode_operations erofs_special_inode_operations = {
-#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-#endif
};
-#ifdef CONFIG_EROFS_FS_XATTR
const struct inode_operations erofs_fast_symlink_xattr_iops = {
.get_link = simple_get_link,
.listxattr = erofs_listxattr,
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 367b39fe46e5..57575c7f5635 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -42,12 +42,12 @@
#define DBG_BUGON(...) ((void)0)
#endif
-#ifdef CONFIG_EROFS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
FAULT_MAX,
};
+#ifdef CONFIG_EROFS_FAULT_INJECTION
extern char *erofs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
@@ -95,6 +95,9 @@ struct erofs_sb_info {
/* the dedicated workstation for compression */
struct radix_tree_root workstn_tree;
+ /* threshold for decompression synchronously */
+ unsigned int max_sync_decompress_pages;
+
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct inode *managed_cache;
#endif
@@ -143,17 +146,24 @@ static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
}
return false;
}
+#else
+static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
+{
+ return false;
+}
+
+static inline void erofs_show_injection_info(int type)
+{
+}
#endif
static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_EROFS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KMALLOC)) {
erofs_show_injection_info(FAULT_KMALLOC);
return NULL;
}
-#endif
return kmalloc(size, flags);
}
@@ -266,6 +276,20 @@ extern int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
#endif
+#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
+
+static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
+ unsigned int nr)
+{
+ return nr <= sbi->max_sync_decompress_pages;
+}
+
+int __init z_erofs_init_zip_subsystem(void);
+void z_erofs_exit_zip_subsystem(void);
+#else
+/* dummy initializer/finalizer for the decompression subsystem */
+static inline int z_erofs_init_zip_subsystem(void) { return 0; }
+static inline void z_erofs_exit_zip_subsystem(void) {}
#endif
/* we strictly follow PAGE_SIZE and no buffer head yet */
@@ -420,30 +444,30 @@ struct erofs_map_blocks {
#define EROFS_GET_BLOCKS_RAW 0x0001
/* data.c */
-static inline struct bio *prepare_bio(
- struct super_block *sb,
- erofs_blk_t blkaddr, unsigned nr_pages,
- bio_end_io_t endio)
+static inline struct bio *
+erofs_grab_bio(struct super_block *sb,
+ erofs_blk_t blkaddr, unsigned int nr_pages,
+ bio_end_io_t endio, bool nofail)
{
- gfp_t gfp = GFP_NOIO;
- struct bio *bio = bio_alloc(gfp, nr_pages);
-
- if (unlikely(bio == NULL) &&
- (current->flags & PF_MEMALLOC)) {
- do {
- nr_pages /= 2;
- if (unlikely(!nr_pages)) {
- bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
- BUG_ON(bio == NULL);
- break;
+ const gfp_t gfp = GFP_NOIO;
+ struct bio *bio;
+
+ do {
+ if (nr_pages == 1) {
+ bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
+ if (unlikely(bio == NULL)) {
+ DBG_BUGON(nofail);
+ return ERR_PTR(-ENOMEM);
}
- bio = bio_alloc(gfp, nr_pages);
- } while (bio == NULL);
- }
+ break;
+ }
+ bio = bio_alloc(gfp, nr_pages);
+ nr_pages /= 2;
+ } while (unlikely(bio == NULL));
bio->bi_end_io = endio;
bio_set_dev(bio, sb->s_bdev);
- bio->bi_iter.bi_sector = blkaddr << LOG_SECTORS_PER_BLOCK;
+ bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
return bio;
}
@@ -453,8 +477,27 @@ static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
submit_bio(bio);
}
-extern struct page *erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio);
+#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
+#define EROFS_IO_MAX_RETRIES_NOFAIL 0
+#else
+#define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
+#endif
+
+extern struct page *__erofs_get_meta_page(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio, bool nofail);
+
+static inline struct page *erofs_get_meta_page(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio)
+{
+ return __erofs_get_meta_page(sb, blkaddr, prio, false);
+}
+
+static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio)
+{
+ return __erofs_get_meta_page(sb, blkaddr, prio, true);
+}
+
extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
struct page **, int);
@@ -465,14 +508,24 @@ struct erofs_map_blocks_iter {
};
-static inline struct page *erofs_get_inline_page(struct inode *inode,
- erofs_blk_t blkaddr)
+static inline struct page *
+erofs_get_inline_page(struct inode *inode,
+ erofs_blk_t blkaddr)
{
return erofs_get_meta_page(inode->i_sb,
blkaddr, S_ISDIR(inode->i_mode));
}
/* inode.c */
+static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
+{
+#if BITS_PER_LONG == 32
+ return (nid >> 32) ^ (nid & 0xffffffff);
+#else
+ return nid;
+#endif
+}
+
extern struct inode *erofs_iget(struct super_block *sb,
erofs_nid_t nid, bool dir);
@@ -480,13 +533,11 @@ extern struct inode *erofs_iget(struct super_block *sb,
int erofs_namei(struct inode *dir, struct qstr *name,
erofs_nid_t *nid, unsigned *d_type);
-/* xattr.c */
#ifdef CONFIG_EROFS_FS_XATTR
+/* xattr.c */
extern const struct xattr_handler *erofs_xattr_handlers[];
-#endif
-/* symlink */
-#ifdef CONFIG_EROFS_FS_XATTR
+/* symlink and special inode */
extern const struct inode_operations erofs_symlink_xattr_iops;
extern const struct inode_operations erofs_fast_symlink_xattr_iops;
extern const struct inode_operations erofs_special_inode_operations;
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a47156101..5596c52e246d 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -17,9 +17,9 @@
/* based on the value of qn->len is accurate */
static inline int dirnamecmp(struct qstr *qn,
- struct qstr *qd, unsigned *matched)
+ struct qstr *qd, unsigned int *matched)
{
- unsigned i = *matched, len = min(qn->len, qd->len);
+ unsigned int i = *matched, len = min(qn->len, qd->len);
loop:
if (unlikely(i >= len)) {
*matched = i;
@@ -46,8 +46,8 @@ static struct erofs_dirent *find_target_dirent(
struct qstr *name,
u8 *data, int maxsize)
{
- unsigned ndirents, head, back;
- unsigned startprfx, endprfx;
+ unsigned int ndirents, head, back;
+ unsigned int startprfx, endprfx;
struct erofs_dirent *const de = (struct erofs_dirent *)data;
/* make sure that maxsize is valid */
@@ -63,9 +63,9 @@ static struct erofs_dirent *find_target_dirent(
startprfx = endprfx = 0;
while (head <= back) {
- unsigned mid = head + (back - head) / 2;
- unsigned nameoff = le16_to_cpu(de[mid].nameoff);
- unsigned matched = min(startprfx, endprfx);
+ unsigned int mid = head + (back - head) / 2;
+ unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
+ unsigned int matched = min(startprfx, endprfx);
struct qstr dname = QSTR_INIT(data + nameoff,
unlikely(mid >= ndirents - 1) ?
@@ -95,8 +95,8 @@ static struct page *find_target_block_classic(
struct inode *dir,
struct qstr *name, int *_diff)
{
- unsigned startprfx, endprfx;
- unsigned head, back;
+ unsigned int startprfx, endprfx;
+ unsigned int head, back;
struct address_space *const mapping = dir->i_mapping;
struct page *candidate = ERR_PTR(-ENOENT);
@@ -105,7 +105,7 @@ static struct page *find_target_block_classic(
back = inode_datablocks(dir) - 1;
while (head <= back) {
- unsigned mid = head + (back - head) / 2;
+ unsigned int mid = head + (back - head) / 2;
struct page *page = read_mapping_page(mapping, mid, NULL);
if (IS_ERR(page)) {
@@ -115,10 +115,10 @@ exact_out:
return page;
} else {
int diff;
- unsigned ndirents, matched;
+ unsigned int ndirents, matched;
struct qstr dname;
struct erofs_dirent *de = kmap_atomic(page);
- unsigned nameoff = le16_to_cpu(de->nameoff);
+ unsigned int nameoff = le16_to_cpu(de->nameoff);
ndirents = nameoff / sizeof(*de);
@@ -164,7 +164,7 @@ exact_out:
int erofs_namei(struct inode *dir,
struct qstr *name,
- erofs_nid_t *nid, unsigned *d_type)
+ erofs_nid_t *nid, unsigned int *d_type)
{
int diff;
struct page *page;
@@ -204,7 +204,7 @@ static struct dentry *erofs_lookup(struct inode *dir,
{
int err;
erofs_nid_t nid;
- unsigned d_type;
+ unsigned int d_type;
struct inode *inode;
DBG_BUGON(!d_really_is_negative(dentry));
@@ -223,18 +223,13 @@ static struct dentry *erofs_lookup(struct inode *dir,
if (err == -ENOENT) {
/* negative dentry */
inode = NULL;
- goto negative_out;
- } else if (unlikely(err))
- return ERR_PTR(err);
-
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
-
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
-negative_out:
+ } else if (unlikely(err)) {
+ inode = ERR_PTR(err);
+ } else {
+ debugln("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
+ }
return d_splice_alias(inode, dentry);
}
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 2df9768edac9..f69e619807a1 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -29,7 +29,7 @@ static void init_once(void *ptr)
inode_init_once(&vi->vfs_inode);
}
-static int erofs_init_inode_cache(void)
+static int __init erofs_init_inode_cache(void)
{
erofs_inode_cachep = kmem_cache_create("erofs_inode",
sizeof(struct erofs_vnode), 0,
@@ -81,7 +81,7 @@ static int superblock_read(struct super_block *sb)
struct erofs_sb_info *sbi;
struct buffer_head *bh;
struct erofs_super_block *layout;
- unsigned blkszbits;
+ unsigned int blkszbits;
int ret;
bh = sb_bread(sb, 0);
@@ -116,9 +116,10 @@ static int superblock_read(struct super_block *sb)
#endif
sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
#ifdef CONFIG_EROFS_FS_ZIP
- sbi->clusterbits = 12;
+ /* TODO: clusterbits should be related to inode */
+ sbi->clusterbits = blkszbits;
- if (1 << (sbi->clusterbits - 12) > Z_EROFS_CLUSTER_MAX_PAGES)
+ if (1 << (sbi->clusterbits - PAGE_SHIFT) > Z_EROFS_CLUSTER_MAX_PAGES)
errln("clusterbits %u is not supported on this kernel",
sbi->clusterbits);
#endif
@@ -144,8 +145,8 @@ char *erofs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
};
-static void erofs_build_fault_attr(struct erofs_sb_info *sbi,
- unsigned int rate)
+static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
+ unsigned int rate)
{
struct erofs_fault_info *ffi = &sbi->fault_info;
@@ -156,11 +157,52 @@ static void erofs_build_fault_attr(struct erofs_sb_info *sbi,
} else {
memset(ffi, 0, sizeof(struct erofs_fault_info));
}
+
+ set_opt(sbi, FAULT_INJECTION);
+}
+
+static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
+ substring_t *args)
+{
+ int rate = 0;
+
+ if (args->from && match_int(args, &rate))
+ return -EINVAL;
+
+ __erofs_build_fault_attr(sbi, rate);
+ return 0;
+}
+
+static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
+{
+ return sbi->fault_info.inject_rate;
+}
+#else
+static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
+ unsigned int rate)
+{
+}
+
+static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
+ substring_t *args)
+{
+ infoln("fault_injection options not supported");
+ return 0;
+}
+
+static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
+{
+ return 0;
}
#endif
static void default_options(struct erofs_sb_info *sbi)
{
+ /* set up some FS parameters */
+#ifdef CONFIG_EROFS_FS_ZIP
+ sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
+#endif
+
#ifdef CONFIG_EROFS_FS_XATTR
set_opt(sbi, XATTR_USER);
#endif
@@ -192,7 +234,7 @@ static int parse_options(struct super_block *sb, char *options)
{
substring_t args[MAX_OPT_ARGS];
char *p;
- int arg = 0;
+ int err;
if (!options)
return 0;
@@ -238,15 +280,11 @@ static int parse_options(struct super_block *sb, char *options)
break;
#endif
case Opt_fault_injection:
- if (args->from && match_int(args, &arg))
- return -EINVAL;
-#ifdef CONFIG_EROFS_FAULT_INJECTION
- erofs_build_fault_attr(EROFS_SB(sb), arg);
- set_opt(EROFS_SB(sb), FAULT_INJECTION);
-#else
- infoln("FAULT_INJECTION was not selected");
-#endif
+ err = erofs_build_fault_attr(EROFS_SB(sb), args);
+ if (err)
+ return err;
break;
+
default:
errln("Unrecognized mount option \"%s\" "
"or missing value", p);
@@ -521,11 +559,6 @@ static struct file_system_type erofs_fs_type = {
};
MODULE_ALIAS_FS("erofs");
-#ifdef CONFIG_EROFS_FS_ZIP
-extern int z_erofs_init_zip_subsystem(void);
-extern void z_erofs_exit_zip_subsystem(void);
-#endif
-
static int __init erofs_module_init(void)
{
int err;
@@ -541,11 +574,9 @@ static int __init erofs_module_init(void)
if (err)
goto shrinker_err;
-#ifdef CONFIG_EROFS_FS_ZIP
err = z_erofs_init_zip_subsystem();
if (err)
goto zip_err;
-#endif
err = register_filesystem(&erofs_fs_type);
if (err)
@@ -555,10 +586,8 @@ static int __init erofs_module_init(void)
return 0;
fs_err:
-#ifdef CONFIG_EROFS_FS_ZIP
z_erofs_exit_zip_subsystem();
zip_err:
-#endif
unregister_shrinker(&erofs_shrinker_info);
shrinker_err:
erofs_exit_inode_cache();
@@ -569,9 +598,7 @@ icache_err:
static void __exit erofs_module_exit(void)
{
unregister_filesystem(&erofs_fs_type);
-#ifdef CONFIG_EROFS_FS_ZIP
z_erofs_exit_zip_subsystem();
-#endif
unregister_shrinker(&erofs_shrinker_info);
erofs_exit_inode_cache();
infoln("successfully finalize erofs");
@@ -615,20 +642,31 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
else
seq_puts(seq, ",noacl");
#endif
-#ifdef CONFIG_EROFS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION))
seq_printf(seq, ",fault_injection=%u",
- sbi->fault_info.inject_rate);
-#endif
+ erofs_get_fault_rate(sbi));
return 0;
}
static int erofs_remount(struct super_block *sb, int *flags, char *data)
{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ unsigned int org_mnt_opt = sbi->mount_opt;
+ unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
+ int err;
+
BUG_ON(!sb_rdonly(sb));
+ err = parse_options(sb, data);
+ if (err)
+ goto out;
*flags |= SB_RDONLY;
return 0;
+out:
+ __erofs_build_fault_attr(sbi, org_inject_rate);
+ sbi->mount_opt = org_mnt_opt;
+
+ return err;
}
const struct super_operations erofs_sops = {
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8721f0a41d15..79d3ba62b298 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -13,6 +13,8 @@
#include "unzip_vle.h"
#include <linux/prefetch.h>
+#include <trace/events/erofs.h>
+
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
@@ -27,7 +29,7 @@ void z_erofs_exit_zip_subsystem(void)
static inline int init_unzip_workqueue(void)
{
- const unsigned onlinecpus = num_possible_cpus();
+ const unsigned int onlinecpus = num_possible_cpus();
/*
* we don't need too many threads, limiting threads
@@ -40,7 +42,7 @@ static inline int init_unzip_workqueue(void)
return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
}
-int z_erofs_init_zip_subsystem(void)
+int __init z_erofs_init_zip_subsystem(void)
{
z_erofs_workgroup_cachep =
kmem_cache_create("erofs_compress",
@@ -89,7 +91,7 @@ struct z_erofs_vle_work_builder {
/* pages used for reading the compressed data */
struct page **compressed_pages;
- unsigned compressed_deficit;
+ unsigned int compressed_deficit;
};
#define VLE_WORK_BUILDER_INIT() \
@@ -232,7 +234,7 @@ static int z_erofs_vle_work_add_page(
ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
page, type, &occupied);
- builder->work->vcnt += (unsigned)ret;
+ builder->work->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
}
@@ -271,36 +273,39 @@ retry:
return true; /* lucky, I am the followee :) */
}
+struct z_erofs_vle_work_finder {
+ struct super_block *sb;
+ pgoff_t idx;
+ unsigned int pageofs;
+
+ struct z_erofs_vle_workgroup **grp_ret;
+ enum z_erofs_vle_work_role *role;
+ z_erofs_vle_owned_workgrp_t *owned_head;
+ bool *hosted;
+};
+
static struct z_erofs_vle_work *
-z_erofs_vle_work_lookup(struct super_block *sb,
- pgoff_t idx, unsigned pageofs,
- struct z_erofs_vle_workgroup **grp_ret,
- enum z_erofs_vle_work_role *role,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
+z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
{
bool tag, primary;
struct erofs_workgroup *egrp;
struct z_erofs_vle_workgroup *grp;
struct z_erofs_vle_work *work;
- egrp = erofs_find_workgroup(sb, idx, &tag);
+ egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
if (egrp == NULL) {
- *grp_ret = NULL;
+ *f->grp_ret = NULL;
return NULL;
}
- *grp_ret = grp = container_of(egrp,
- struct z_erofs_vle_workgroup, obj);
+ grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
+ *f->grp_ret = grp;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- work = z_erofs_vle_grab_work(grp, pageofs);
+ work = z_erofs_vle_grab_work(grp, f->pageofs);
+ /* if multiref is disabled, `primary' is always true */
primary = true;
-#else
- BUG();
-#endif
- DBG_BUGON(work->pageofs != pageofs);
+ DBG_BUGON(work->pageofs != f->pageofs);
/*
* lock must be taken first to avoid grp->next == NIL between
@@ -340,43 +345,35 @@ z_erofs_vle_work_lookup(struct super_block *sb,
*/
mutex_lock(&work->lock);
- *hosted = false;
+ *f->hosted = false;
if (!primary)
- *role = Z_EROFS_VLE_WORK_SECONDARY;
+ *f->role = Z_EROFS_VLE_WORK_SECONDARY;
/* claim the workgroup if possible */
- else if (try_to_claim_workgroup(grp, owned_head, hosted))
- *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+ else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
+ *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
else
- *role = Z_EROFS_VLE_WORK_PRIMARY;
+ *f->role = Z_EROFS_VLE_WORK_PRIMARY;
return work;
}
static struct z_erofs_vle_work *
-z_erofs_vle_work_register(struct super_block *sb,
- struct z_erofs_vle_workgroup **grp_ret,
- struct erofs_map_blocks *map,
- pgoff_t index, unsigned pageofs,
- enum z_erofs_vle_work_role *role,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
+z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
+ struct erofs_map_blocks *map)
{
- bool newgrp = false;
- struct z_erofs_vle_workgroup *grp = *grp_ret;
+ bool gnew = false;
+ struct z_erofs_vle_workgroup *grp = *f->grp_ret;
struct z_erofs_vle_work *work;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ /* if multiref is disabled, grp should never be nullptr */
BUG_ON(grp != NULL);
-#else
- if (grp != NULL)
- goto skip;
-#endif
+
/* no available workgroup, let's allocate one */
grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
if (unlikely(grp == NULL))
return ERR_PTR(-ENOMEM);
- grp->obj.index = index;
+ grp->obj.index = f->idx;
grp->llen = map->m_llen;
z_erofs_vle_set_workgrp_fmt(grp,
@@ -386,26 +383,20 @@ z_erofs_vle_work_register(struct super_block *sb,
atomic_set(&grp->obj.refcount, 1);
/* new workgrps have been claimed as type 1 */
- WRITE_ONCE(grp->next, *owned_head);
+ WRITE_ONCE(grp->next, *f->owned_head);
/* primary and followed work for all new workgrps */
- *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+ *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
/* it should be submitted by ourselves */
- *hosted = true;
+ *f->hosted = true;
- newgrp = true;
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-skip:
- /* currently unimplemented */
- BUG();
-#else
+ gnew = true;
work = z_erofs_vle_grab_primary_work(grp);
-#endif
- work->pageofs = pageofs;
+ work->pageofs = f->pageofs;
mutex_init(&work->lock);
- if (newgrp) {
- int err = erofs_register_workgroup(sb, &grp->obj, 0);
+ if (gnew) {
+ int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
if (err) {
kmem_cache_free(z_erofs_workgroup_cachep, grp);
@@ -413,24 +404,12 @@ skip:
}
}
- *owned_head = *grp_ret = grp;
+ *f->owned_head = *f->grp_ret = grp;
mutex_lock(&work->lock);
return work;
}
-static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
- unsigned int llen)
-{
- while (1) {
- unsigned int orig_llen = grp->llen;
-
- if (orig_llen >= llen || orig_llen ==
- cmpxchg(&grp->llen, orig_llen, llen))
- break;
- }
-}
-
#define builder_is_followed(builder) \
((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
@@ -439,10 +418,17 @@ static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
struct erofs_map_blocks *map,
z_erofs_vle_owned_workgrp_t *owned_head)
{
- const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
- const erofs_blk_t index = erofs_blknr(map->m_pa);
- const unsigned pageofs = map->m_la & ~PAGE_MASK;
+ const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
struct z_erofs_vle_workgroup *grp;
+ const struct z_erofs_vle_work_finder finder = {
+ .sb = sb,
+ .idx = erofs_blknr(map->m_pa),
+ .pageofs = map->m_la & ~PAGE_MASK,
+ .grp_ret = &grp,
+ .role = &builder->role,
+ .owned_head = owned_head,
+ .hosted = &builder->hosted
+ };
struct z_erofs_vle_work *work;
DBG_BUGON(builder->work != NULL);
@@ -454,16 +440,19 @@ static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
DBG_BUGON(erofs_blkoff(map->m_pa));
repeat:
- work = z_erofs_vle_work_lookup(sb, index,
- pageofs, &grp, &builder->role, owned_head, &builder->hosted);
+ work = z_erofs_vle_work_lookup(&finder);
if (work != NULL) {
- __update_workgrp_llen(grp, map->m_llen);
+ unsigned int orig_llen;
+
+ /* increase workgroup `llen' if needed */
+ while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
+ orig_llen != cmpxchg_relaxed(&grp->llen,
+ orig_llen, map->m_llen))
+ cpu_relax();
goto got_it;
}
- work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
- &builder->role, owned_head, &builder->hosted);
-
+ work = z_erofs_vle_work_register(&finder, map);
if (unlikely(work == ERR_PTR(-EAGAIN)))
goto repeat;
@@ -605,8 +594,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
#endif
enum z_erofs_page_type page_type;
- unsigned cur, end, spiltted, index;
- int err;
+ unsigned int cur, end, spiltted, index;
+ int err = 0;
+
+ trace_erofs_readpage(page, false);
/* register locked file pages as online pages in pack */
z_erofs_onlinepage_init(page);
@@ -624,7 +615,7 @@ repeat:
/* go ahead the next map_blocks */
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
- if (!z_erofs_vle_work_iter_end(builder))
+ if (z_erofs_vle_work_iter_end(builder))
fe->initial = false;
map->m_la = offset + cur;
@@ -633,12 +624,11 @@ repeat:
if (unlikely(err))
goto err_out;
- /* deal with hole (FIXME! broken now) */
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted;
DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
- BUG_ON(erofs_blkoff(map->m_pa));
+ DBG_BUGON(erofs_blkoff(map->m_pa));
err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
if (unlikely(err))
@@ -662,7 +652,7 @@ repeat:
tight &= builder_is_followed(builder);
work = builder->work;
hitted:
- cur = end - min_t(unsigned, offset + end - map->m_la, end);
+ cur = end - min_t(unsigned int, offset + end - map->m_la, end);
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
zero_user_segment(page, cur, end);
goto next_part;
@@ -683,7 +673,7 @@ retry:
err = z_erofs_vle_work_add_page(builder,
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
- if (!err)
+ if (likely(!err))
goto retry;
}
@@ -694,9 +684,10 @@ retry:
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_fixup(page, index, true);
- ++spiltted;
- /* also update nr_pages and increase queued_pages */
+ /* bump up the number of spiltted parts of a page */
+ ++spiltted;
+ /* also update nr_pages */
work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
next_part:
/* can be used for verification */
@@ -706,16 +697,18 @@ next_part:
if (end > 0)
goto repeat;
+out:
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_endio(page);
debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
__func__, page, spiltted, map->m_llen);
- return 0;
+ return err;
+ /* if some error occurred while processing this page */
err_out:
- /* TODO: the missing error handing cases */
- return err;
+ SetPageError(page);
+ goto out;
}
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
@@ -736,7 +729,7 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
static inline void z_erofs_vle_read_endio(struct bio *bio)
{
const blk_status_t err = bio->bi_status;
- unsigned i;
+ unsigned int i;
struct bio_vec *bvec;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct address_space *mngda = NULL;
@@ -788,16 +781,14 @@ static int z_erofs_vle_unzip(struct super_block *sb,
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct address_space *const mngda = sbi->managed_cache->i_mapping;
#endif
- const unsigned clusterpages = erofs_clusterpages(sbi);
+ const unsigned int clusterpages = erofs_clusterpages(sbi);
struct z_erofs_pagevec_ctor ctor;
- unsigned nr_pages;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- unsigned sparsemem_pages = 0;
-#endif
+ unsigned int nr_pages;
+ unsigned int sparsemem_pages = 0;
struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
struct page **pages, **compressed_pages, *page;
- unsigned i, llen;
+ unsigned int i, llen;
enum z_erofs_page_type page_type;
bool overlapped;
@@ -806,11 +797,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
int err;
might_sleep();
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
work = z_erofs_vle_grab_primary_work(grp);
-#else
- BUG();
-#endif
BUG_ON(!READ_ONCE(work->nr_pages));
mutex_lock(&work->lock);
@@ -844,7 +831,7 @@ repeat:
Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
for (i = 0; i < work->vcnt; ++i) {
- unsigned pagenr;
+ unsigned int pagenr;
page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
@@ -861,13 +848,11 @@ repeat:
pagenr = z_erofs_onlinepage_index(page);
BUG_ON(pagenr >= nr_pages);
-
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
BUG_ON(pages[pagenr] != NULL);
- ++sparsemem_pages;
-#endif
+
pages[pagenr] = page;
}
+ sparsemem_pages = i;
z_erofs_pagevec_ctor_exit(&ctor, true);
@@ -875,7 +860,7 @@ repeat:
compressed_pages = grp->compressed_pages;
for (i = 0; i < clusterpages; ++i) {
- unsigned pagenr;
+ unsigned int pagenr;
page = compressed_pages[i];
@@ -897,10 +882,8 @@ repeat:
pagenr = z_erofs_onlinepage_index(page);
BUG_ON(pagenr >= nr_pages);
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
BUG_ON(pages[pagenr] != NULL);
++sparsemem_pages;
-#endif
pages[pagenr] = page;
overlapped = true;
@@ -926,12 +909,10 @@ repeat:
if (err != -ENOTSUPP)
goto out_percpu;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
if (sparsemem_pages >= nr_pages) {
BUG_ON(sparsemem_pages > nr_pages);
goto skip_allocpage;
}
-#endif
for (i = 0; i < nr_pages; ++i) {
if (pages[i] != NULL)
@@ -940,9 +921,7 @@ repeat:
pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
}
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
skip_allocpage:
-#endif
vout = erofs_vmap(pages, nr_pages);
err = z_erofs_vle_unzip_vmap(compressed_pages,
@@ -1100,7 +1079,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
bool force_fg)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
- const unsigned clusterpages = erofs_clusterpages(sbi);
+ const unsigned int clusterpages = erofs_clusterpages(sbi);
const gfp_t gfp = GFP_NOFS;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct address_space *const mngda = sbi->managed_cache->i_mapping;
@@ -1112,7 +1091,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
/* since bio will be NULL, no need to initialize last_index */
pgoff_t uninitialized_var(last_index);
bool force_submit = false;
- unsigned nr_bios;
+ unsigned int nr_bios;
if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
return false;
@@ -1144,7 +1123,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
struct z_erofs_vle_workgroup *grp;
struct page **compressed_pages, *oldpage, *page;
pgoff_t first_index;
- unsigned i = 0;
+ unsigned int i = 0;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
unsigned int noio = 0;
bool cachemngd;
@@ -1213,8 +1192,8 @@ submit_bio_retry:
}
if (bio == NULL) {
- bio = prepare_bio(sb, first_index + i,
- BIO_MAX_PAGES, z_erofs_vle_read_endio);
+ bio = erofs_grab_bio(sb, first_index + i,
+ BIO_MAX_PAGES, z_erofs_vle_read_endio, true);
bio->bi_private = tagptr_cast_ptr(bi_private);
++nr_bios;
@@ -1309,7 +1288,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
LIST_HEAD(pagepool);
#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- f.cachedzone_la = page->index << PAGE_SHIFT;
+ f.cachedzone_la = (erofs_off_t)page->index << PAGE_SHIFT;
#endif
err = z_erofs_do_read_page(&f, page, &pagepool);
(void)z_erofs_vle_work_iter_end(&f.builder);
@@ -1329,20 +1308,25 @@ out:
return 0;
}
-static inline int __z_erofs_vle_normalaccess_readpages(
- struct file *filp,
- struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages, bool sync)
+static int z_erofs_vle_normalaccess_readpages(struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned int nr_pages)
{
struct inode *const inode = mapping->host;
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+ const bool sync = __should_decompress_synchronously(sbi, nr_pages);
struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
struct page *head = NULL;
LIST_HEAD(pagepool);
+ trace_erofs_readpages(mapping->host, lru_to_page(pages),
+ nr_pages, false);
+
#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
+ f.cachedzone_la = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
#endif
for (; nr_pages; --nr_pages) {
struct page *page = lru_to_page(pages);
@@ -1390,56 +1374,45 @@ static inline int __z_erofs_vle_normalaccess_readpages(
return 0;
}
-static int z_erofs_vle_normalaccess_readpages(
- struct file *filp,
- struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
-{
- return __z_erofs_vle_normalaccess_readpages(filp,
- mapping, pages, nr_pages,
- nr_pages < 4 /* sync */);
-}
-
const struct address_space_operations z_erofs_vle_normalaccess_aops = {
.readpage = z_erofs_vle_normalaccess_readpage,
.readpages = z_erofs_vle_normalaccess_readpages,
};
+/*
+ * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
+ * ---
+ * VLE compression mode attempts to compress a number of logical data into
+ * a physical cluster with a fixed size.
+ * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
+ */
#define __vle_cluster_advise(x, bit, bits) \
((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
#define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
-enum {
- Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
- Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
- Z_EROFS_VLE_CLUSTER_TYPE_MAX
-};
-
#define vle_cluster_type(di) \
__vle_cluster_type((di)->di_advise)
-static inline unsigned
-vle_compressed_index_clusterofs(unsigned clustersize,
- struct z_erofs_vle_decompressed_index *di)
+static int
+vle_decompressed_index_clusterofs(unsigned int *clusterofs,
+ unsigned int clustersize,
+ struct z_erofs_vle_decompressed_index *di)
{
- debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
- __func__, di, di->di_advise, vle_cluster_type(di),
- di->di_clusterofs, di->di_u.blkaddr);
-
switch (vle_cluster_type(di)) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ *clusterofs = clustersize;
break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- return di->di_clusterofs;
+ *clusterofs = le16_to_cpu(di->di_clusterofs);
+ break;
default:
- BUG_ON(1);
+ DBG_BUGON(1);
+ return -EIO;
}
- return clustersize;
+ return 0;
}
static inline erofs_blk_t
@@ -1448,7 +1421,7 @@ vle_extent_blkaddr(struct inode *inode, pgoff_t index)
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
struct erofs_vnode *vi = EROFS_V(inode);
- unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+ unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
vi->xattr_isize) + sizeof(struct erofs_extent_header) +
index * sizeof(struct z_erofs_vle_decompressed_index);
@@ -1461,95 +1434,117 @@ vle_extent_blkoff(struct inode *inode, pgoff_t index)
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
struct erofs_vnode *vi = EROFS_V(inode);
- unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+ unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
vi->xattr_isize) + sizeof(struct erofs_extent_header) +
index * sizeof(struct z_erofs_vle_decompressed_index);
return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
}
-/*
- * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
- * ---
- * VLE compression mode attempts to compress a number of logical data into
- * a physical cluster with a fixed size.
- * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
- */
-static erofs_off_t vle_get_logical_extent_head(
- struct inode *inode,
- struct page **page_iter,
- void **kaddr_iter,
- unsigned lcn, /* logical cluster number */
- erofs_blk_t *pcn,
- unsigned *flags)
+struct vle_map_blocks_iter_ctx {
+ struct inode *inode;
+ struct super_block *sb;
+ unsigned int clusterbits;
+
+ struct page **mpage_ret;
+ void **kaddr_ret;
+};
+
+static int
+vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
+ unsigned int lcn, /* logical cluster number */
+ unsigned long long *ofs,
+ erofs_blk_t *pblk,
+ unsigned int *flags)
{
- /* for extent meta */
- struct page *page = *page_iter;
- erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
+ const unsigned int clustersize = 1 << ctx->clusterbits;
+ const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
+ struct page *mpage = *ctx->mpage_ret; /* extent metapage */
+
struct z_erofs_vle_decompressed_index *di;
- unsigned long long ofs;
- const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
- const unsigned int clustersize = 1 << clusterbits;
+ unsigned int cluster_type, delta0;
- if (page->index != blkaddr) {
- kunmap_atomic(*kaddr_iter);
- unlock_page(page);
- put_page(page);
+ if (mpage->index != mblk) {
+ kunmap_atomic(*ctx->kaddr_ret);
+ unlock_page(mpage);
+ put_page(mpage);
- *page_iter = page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- *kaddr_iter = kmap_atomic(page);
+ mpage = erofs_get_meta_page(ctx->sb, mblk, false);
+ if (IS_ERR(mpage)) {
+ *ctx->mpage_ret = NULL;
+ return PTR_ERR(mpage);
+ }
+ *ctx->mpage_ret = mpage;
+ *ctx->kaddr_ret = kmap_atomic(mpage);
}
- di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
- switch (vle_cluster_type(di)) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- BUG_ON(!di->di_u.delta[0]);
- BUG_ON(lcn < di->di_u.delta[0]);
+ di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
- ofs = vle_get_logical_extent_head(inode,
- page_iter, kaddr_iter,
- lcn - di->di_u.delta[0], pcn, flags);
- break;
+ cluster_type = vle_cluster_type(di);
+ switch (cluster_type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ delta0 = le16_to_cpu(di->di_u.delta[0]);
+ if (unlikely(!delta0 || delta0 > lcn)) {
+ errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
+ delta0, lcn, EROFS_V(ctx->inode)->nid);
+ DBG_BUGON(1);
+ return -EIO;
+ }
+ return vle_get_logical_extent_head(ctx,
+ lcn - delta0, ofs, pblk, flags);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
*flags ^= EROFS_MAP_ZIPPED;
+ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
/* clustersize should be a power of two */
- ofs = ((unsigned long long)lcn << clusterbits) +
+ *ofs = ((u64)lcn << ctx->clusterbits) +
(le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
- *pcn = le32_to_cpu(di->di_u.blkaddr);
+ *pblk = le32_to_cpu(di->di_u.blkaddr);
break;
default:
- BUG_ON(1);
+ errln("unknown cluster type %u at lcn %u of nid %llu",
+ cluster_type, lcn, EROFS_V(ctx->inode)->nid);
+ DBG_BUGON(1);
+ return -EIO;
}
- return ofs;
+ return 0;
}
int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
struct page **mpage_ret, int flags)
{
+ void *kaddr;
+ const struct vle_map_blocks_iter_ctx ctx = {
+ .inode = inode,
+ .sb = inode->i_sb,
+ .clusterbits = EROFS_I_SB(inode)->clusterbits,
+ .mpage_ret = mpage_ret,
+ .kaddr_ret = &kaddr
+ };
+ const unsigned int clustersize = 1 << ctx.clusterbits;
+ /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
+ const bool initial = !map->m_llen;
+
/* logicial extent (start, end) offset */
unsigned long long ofs, end;
- struct z_erofs_vle_decompressed_index *di;
- erofs_blk_t e_blkaddr, pcn;
- unsigned lcn, logical_cluster_ofs, cluster_type;
+ unsigned int lcn;
u32 ofs_rem;
+
+ /* initialize `pblk' to keep gcc from printing foolish warnings */
+ erofs_blk_t mblk, pblk = 0;
struct page *mpage = *mpage_ret;
- void *kaddr;
- bool initial;
- const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
- const unsigned int clustersize = 1 << clusterbits;
+ struct z_erofs_vle_decompressed_index *di;
+ unsigned int cluster_type, logical_cluster_ofs;
int err = 0;
- /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
- initial = !map->m_llen;
+ trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
/* when trying to read beyond EOF, leave it unmapped */
if (unlikely(map->m_la >= inode->i_size)) {
- BUG_ON(!initial);
+ DBG_BUGON(!initial);
map->m_llen = map->m_la + 1 - inode->i_size;
- map->m_la = inode->i_size - 1;
+ map->m_la = inode->i_size;
map->m_flags = 0;
goto out;
}
@@ -1560,16 +1555,20 @@ int z_erofs_map_blocks_iter(struct inode *inode,
ofs = map->m_la + map->m_llen;
/* clustersize should be power of two */
- lcn = ofs >> clusterbits;
+ lcn = ofs >> ctx.clusterbits;
ofs_rem = ofs & (clustersize - 1);
- e_blkaddr = vle_extent_blkaddr(inode, lcn);
+ mblk = vle_extent_blkaddr(inode, lcn);
- if (mpage == NULL || mpage->index != e_blkaddr) {
+ if (!mpage || mpage->index != mblk) {
if (mpage != NULL)
put_page(mpage);
- mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
+ mpage = erofs_get_meta_page(ctx.sb, mblk, false);
+ if (IS_ERR(mpage)) {
+ err = PTR_ERR(mpage);
+ goto out;
+ }
*mpage_ret = mpage;
} else {
lock_page(mpage);
@@ -1579,10 +1578,14 @@ int z_erofs_map_blocks_iter(struct inode *inode,
kaddr = kmap_atomic(mpage);
di = kaddr + vle_extent_blkoff(inode, lcn);
- debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
- e_blkaddr, vle_extent_blkoff(inode, lcn));
+ debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
+ mblk, vle_extent_blkoff(inode, lcn));
+
+ err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
+ clustersize, di);
+ if (unlikely(err))
+ goto unmap_out;
- logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
if (!initial) {
/* [walking mode] 'map' has been already initialized */
map->m_llen += logical_cluster_ofs;
@@ -1592,7 +1595,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* by default, compressed */
map->m_flags |= EROFS_MAP_ZIPPED;
- end = (u64)(lcn + 1) * clustersize;
+ end = ((u64)lcn + 1) * clustersize;
cluster_type = vle_cluster_type(di);
@@ -1603,13 +1606,13 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
if (ofs_rem == logical_cluster_ofs) {
- pcn = le32_to_cpu(di->di_u.blkaddr);
+ pblk = le32_to_cpu(di->di_u.blkaddr);
goto exact_hitted;
}
if (ofs_rem > logical_cluster_ofs) {
- ofs = lcn * clustersize | logical_cluster_ofs;
- pcn = le32_to_cpu(di->di_u.blkaddr);
+ ofs = (u64)lcn * clustersize | logical_cluster_ofs;
+ pblk = le32_to_cpu(di->di_u.blkaddr);
break;
}
@@ -1620,13 +1623,19 @@ int z_erofs_map_blocks_iter(struct inode *inode,
err = -EIO;
goto unmap_out;
}
- end = (lcn-- * clustersize) | logical_cluster_ofs;
+ end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
- ofs = vle_get_logical_extent_head(inode, mpage_ret,
- &kaddr, lcn, &pcn, &map->m_flags);
+ err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
+ &pblk, &map->m_flags);
mpage = *mpage_ret;
+
+ if (unlikely(err)) {
+ if (mpage)
+ goto unmap_out;
+ goto out;
+ }
break;
default:
errln("unknown cluster type %u at offset %llu of nid %llu",
@@ -1639,7 +1648,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
exact_hitted:
map->m_llen = end - ofs;
map->m_plen = clustersize;
- map->m_pa = blknr_to_addr(pcn);
+ map->m_pa = blknr_to_addr(pblk);
map->m_flags |= EROFS_MAP_MAPPED;
unmap_out:
kunmap_atomic(kaddr);
@@ -1649,8 +1658,10 @@ out:
__func__, map->m_la, map->m_pa,
map->m_llen, map->m_plen, map->m_flags);
+ trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+
/* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
- DBG_BUGON(err < 0);
+ DBG_BUGON(err < 0 && err != -ENOMEM);
return err;
}
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 393998500865..3316bc36965d 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
#define Z_EROFS_VLE_INLINE_PAGEVECS 3
struct z_erofs_vle_work {
- /* struct z_erofs_vle_work *left, *right; */
-
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
- struct list_head list;
-
- atomic_t refcount;
-#endif
struct mutex lock;
/* I: decompression offset in page */
@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
}
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-#error multiref decompression is unimplemented yet
-#else
+/* definitions if multiref is disabled */
#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
#define z_erofs_vle_work_workgroup(wrk, primary) \
@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
struct z_erofs_vle_workgroup, work) : \
({ BUG(); (void *)NULL; }))
-#endif
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index f5b665f15be5..1a428658cbea 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -23,14 +23,14 @@ static struct {
} erofs_pcpubuf[NR_CPUS];
int z_erofs_vle_plain_copy(struct page **compressed_pages,
- unsigned clusterpages,
+ unsigned int clusterpages,
struct page **pages,
- unsigned nr_pages,
+ unsigned int nr_pages,
unsigned short pageofs)
{
- unsigned i, j;
+ unsigned int i, j;
void *src = NULL;
- const unsigned righthalf = PAGE_SIZE - pageofs;
+ const unsigned int righthalf = PAGE_SIZE - pageofs;
char *percpu_data;
bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
@@ -42,8 +42,8 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
struct page *page = pages[i];
void *dst;
- if (page == NULL) {
- if (src != NULL) {
+ if (!page) {
+ if (src) {
if (!mirrored[j])
kunmap_atomic(src);
src = NULL;
@@ -64,14 +64,14 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
}
if (i) {
- if (src == NULL)
- src = mirrored[i-1] ?
- percpu_data + (i-1) * PAGE_SIZE :
- kmap_atomic(compressed_pages[i-1]);
+ if (!src)
+ src = mirrored[i - 1] ?
+ percpu_data + (i - 1) * PAGE_SIZE :
+ kmap_atomic(compressed_pages[i - 1]);
memcpy(dst, src + righthalf, pageofs);
- if (!mirrored[i-1])
+ if (!mirrored[i - 1])
kunmap_atomic(src);
if (unlikely(i >= clusterpages)) {
@@ -80,9 +80,9 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
}
}
- if (!righthalf)
+ if (!righthalf) {
src = NULL;
- else {
+ } else {
src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
kmap_atomic(compressed_pages[i]);
@@ -92,7 +92,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
kunmap_atomic(dst);
}
- if (src != NULL && !mirrored[j])
+ if (src && !mirrored[j])
kunmap_atomic(src);
preempt_enable();
@@ -102,14 +102,14 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
- unsigned clusterpages,
+ unsigned int clusterpages,
struct page **pages,
- unsigned outlen,
+ unsigned int outlen,
unsigned short pageofs,
void (*endio)(struct page *))
{
void *vin, *vout;
- unsigned nr_pages, i, j;
+ unsigned int nr_pages, i, j;
int ret;
if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
@@ -126,7 +126,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
vout = erofs_pcpubuf[smp_processor_id()].data;
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
- clusterpages * PAGE_SIZE, outlen);
+ clusterpages * PAGE_SIZE, outlen);
if (ret >= 0) {
outlen = ret;
@@ -134,14 +134,15 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
}
for (i = 0; i < nr_pages; ++i) {
- j = min((unsigned)PAGE_SIZE - pageofs, outlen);
+ j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
- if (pages[i] != NULL) {
- if (ret < 0)
+ if (pages[i]) {
+ if (ret < 0) {
SetPageError(pages[i]);
- else if (clusterpages == 1 && pages[i] == compressed_pages[0])
+ } else if (clusterpages == 1 &&
+ pages[i] == compressed_pages[0]) {
memcpy(vin + pageofs, vout + pageofs, j);
- else {
+ } else {
void *dst = kmap_atomic(pages[i]);
memcpy(dst + pageofs, vout + pageofs, j);
@@ -164,14 +165,14 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
}
int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
- unsigned clusterpages,
+ unsigned int clusterpages,
void *vout,
- unsigned llen,
+ unsigned int llen,
unsigned short pageofs,
bool overlapped)
{
void *vin;
- unsigned i;
+ unsigned int i;
int ret;
if (overlapped) {
@@ -181,29 +182,27 @@ int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
for (i = 0; i < clusterpages; ++i) {
void *t = kmap_atomic(compressed_pages[i]);
- memcpy(vin + PAGE_SIZE *i, t, PAGE_SIZE);
+ memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE);
kunmap_atomic(t);
}
- } else if (clusterpages == 1)
+ } else if (clusterpages == 1) {
vin = kmap_atomic(compressed_pages[0]);
- else {
+ } else {
vin = erofs_vmap(compressed_pages, clusterpages);
}
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
- clusterpages * PAGE_SIZE, llen);
+ clusterpages * PAGE_SIZE, llen);
if (ret > 0)
ret = 0;
if (!overlapped) {
if (clusterpages == 1)
kunmap_atomic(vin);
- else {
+ else
erofs_vunmap(vin, clusterpages);
- }
- } else
+ } else {
preempt_enable();
-
+ }
return ret;
}
-
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 595cf90af9bb..ea8a962e5c95 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -35,7 +35,6 @@ static atomic_long_t erofs_global_shrink_cnt;
#ifdef CONFIG_EROFS_FS_ZIP
-/* radix_tree and the future XArray both don't use tagptr_t yet */
struct erofs_workgroup *erofs_find_workgroup(
struct super_block *sb, pgoff_t index, bool *tag)
{
@@ -47,9 +46,8 @@ repeat:
rcu_read_lock();
grp = radix_tree_lookup(&sbi->workstn_tree, index);
if (grp != NULL) {
- *tag = radix_tree_exceptional_entry(grp);
- grp = (void *)((unsigned long)grp &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY);
+ *tag = xa_pointer_tag(grp);
+ grp = xa_untag_pointer(grp);
if (erofs_workgroup_get(grp, &oldcount)) {
/* prefer to relax rcu read side */
@@ -83,9 +81,7 @@ int erofs_register_workgroup(struct super_block *sb,
sbi = EROFS_SB(sb);
erofs_workstn_lock(sbi);
- if (tag)
- grp = (void *)((unsigned long)grp |
- 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
+ grp = xa_tag_pointer(grp, tag);
err = radix_tree_insert(&sbi->workstn_tree,
grp->index, grp);
@@ -120,7 +116,7 @@ unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
{
pgoff_t first_index = 0;
void *batch[PAGEVEC_SIZE];
- unsigned freed = 0;
+ unsigned int freed = 0;
int i, found;
repeat:
@@ -131,9 +127,7 @@ repeat:
for (i = 0; i < found; ++i) {
int cnt;
- struct erofs_workgroup *grp = (void *)
- ((unsigned long)batch[i] &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY);
+ struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
first_index = grp->index + 1;
@@ -150,8 +144,8 @@ repeat:
#endif
continue;
- if (radix_tree_delete(&sbi->workstn_tree,
- grp->index) != grp) {
+ if (xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+ grp->index)) != grp) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
skip:
erofs_workgroup_unfreeze(grp, 1);
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 0e9cfeccdf99..80dca6a4adbe 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -19,41 +19,53 @@ struct xattr_iter {
void *kaddr;
erofs_blk_t blkaddr;
- unsigned ofs;
+ unsigned int ofs;
};
static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
{
- /* only init_inode_xattrs use non-atomic once */
+ /* the only user of kunmap() is 'init_inode_xattrs' */
if (unlikely(!atomic))
kunmap(it->page);
else
kunmap_atomic(it->kaddr);
+
unlock_page(it->page);
put_page(it->page);
}
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+ if (it->page == NULL)
+ return;
+
+ xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
{
struct xattr_iter it;
- unsigned i;
+ unsigned int i;
struct erofs_xattr_ibody_header *ih;
+ struct super_block *sb;
struct erofs_sb_info *sbi;
struct erofs_vnode *vi;
bool atomic_map;
if (likely(inode_has_inited_xattr(inode)))
- return;
+ return 0;
vi = EROFS_V(inode);
BUG_ON(!vi->xattr_isize);
- sbi = EROFS_I_SB(inode);
+ sb = inode->i_sb;
+ sbi = EROFS_SB(sb);
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it.page = erofs_get_inline_page(inode, it.blkaddr);
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page))
+ return PTR_ERR(it.page);
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
@@ -62,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
- vi->xattr_shared_count, sizeof(unsigned),
- GFP_KERNEL | __GFP_NOFAIL);
+ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+ sizeof(uint), GFP_KERNEL);
+ if (vi->xattr_shared_xattrs == NULL) {
+ xattr_iter_end(&it, atomic_map);
+ return -ENOMEM;
+ }
/* let's skip ibody header */
it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -75,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
BUG_ON(it.ofs != EROFS_BLKSIZ);
xattr_iter_end(&it, atomic_map);
- it.page = erofs_get_meta_page(inode->i_sb,
+ it.page = erofs_get_meta_page(sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page))
+ return PTR_ERR(it.page);
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
@@ -90,27 +106,43 @@ static void init_inode_xattrs(struct inode *inode)
xattr_iter_end(&it, atomic_map);
inode_set_inited_xattr(inode);
+ return 0;
}
+/*
+ * the general idea for these return values is
+ * if 0 is returned, go on processing the current xattr;
+ * 1 (> 0) is returned, skip this round to process the next xattr;
+ * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
+ * and need to be handled
+ */
struct xattr_iter_handlers {
int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
- int (*name)(struct xattr_iter *, unsigned, char *, unsigned);
- int (*alloc_buffer)(struct xattr_iter *, unsigned);
- void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
+ int (*name)(struct xattr_iter *, unsigned int, char *, unsigned int);
+ int (*alloc_buffer)(struct xattr_iter *, unsigned int);
+ void (*value)(struct xattr_iter *, unsigned int, char *, unsigned int);
};
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
{
- if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
- xattr_iter_end(it, true);
+ if (it->ofs < EROFS_BLKSIZ)
+ return 0;
+
+ xattr_iter_end(it, true);
+
+ it->blkaddr += erofs_blknr(it->ofs);
- it->blkaddr += erofs_blknr(it->ofs);
- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
- BUG_ON(IS_ERR(it->page));
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+ if (IS_ERR(it->page)) {
+ int err = PTR_ERR(it->page);
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
+ it->page = NULL;
+ return err;
}
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ return 0;
}
static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -118,7 +150,7 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
{
struct erofs_vnode *const vi = EROFS_V(inode);
struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
- unsigned xattr_header_sz, inline_xattr_ofs;
+ unsigned int xattr_header_sz, inline_xattr_ofs;
xattr_header_sz = inlinexattr_header_size(inode);
if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
@@ -132,21 +164,28 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
it->page = erofs_get_inline_page(inode, it->blkaddr);
- BUG_ON(IS_ERR(it->page));
- it->kaddr = kmap_atomic(it->page);
+ if (IS_ERR(it->page))
+ return PTR_ERR(it->page);
+ it->kaddr = kmap_atomic(it->page);
return vi->xattr_isize - xattr_header_sz;
}
+/*
+ * Regardless of success or failure, `xattr_foreach' will end up with
+ * `ofs' pointing to the next xattr item rather than an arbitrary position.
+ */
static int xattr_foreach(struct xattr_iter *it,
- struct xattr_iter_handlers *op, unsigned *tlimit)
+ const struct xattr_iter_handlers *op, unsigned int *tlimit)
{
struct erofs_xattr_entry entry;
- unsigned value_sz, processed, slice;
+ unsigned int value_sz, processed, slice;
int err;
/* 0. fixup blkaddr, ofs, ipage */
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ return err;
/*
* 1. read xattr entry to the memory,
@@ -155,7 +194,7 @@ static int xattr_foreach(struct xattr_iter *it,
*/
entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
if (tlimit != NULL) {
- unsigned entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
+ unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
BUG_ON(*tlimit < entry_sz);
*tlimit -= entry_sz;
@@ -178,12 +217,14 @@ static int xattr_foreach(struct xattr_iter *it,
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
- slice = min_t(unsigned, PAGE_SIZE - it->ofs,
- entry.e_name_len - processed);
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ entry.e_name_len - processed);
/* handle name */
err = op->name(it, processed, it->kaddr + it->ofs, slice);
@@ -210,21 +251,24 @@ static int xattr_foreach(struct xattr_iter *it,
while (processed < value_sz) {
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
- slice = min_t(unsigned, PAGE_SIZE - it->ofs,
- value_sz - processed);
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ value_sz - processed);
op->value(it, processed, it->kaddr + it->ofs, slice);
it->ofs += slice;
processed += slice;
}
out:
- /* we assume that ofs is aligned with 4 bytes */
+ /* xattrs should be 4-byte aligned (on-disk constraint) */
it->ofs = EROFS_XATTR_ALIGN(it->ofs);
- return err;
+ return err < 0 ? err : 0;
}
struct getxattr_iter {
@@ -245,7 +289,7 @@ static int xattr_entrymatch(struct xattr_iter *_it,
}
static int xattr_namematch(struct xattr_iter *_it,
- unsigned processed, char *buf, unsigned len)
+ unsigned int processed, char *buf, unsigned int len)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
@@ -253,7 +297,7 @@ static int xattr_namematch(struct xattr_iter *_it,
}
static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned value_sz)
+ unsigned int value_sz)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
int err = it->buffer_size < value_sz ? -ERANGE : 0;
@@ -263,14 +307,14 @@ static int xattr_checkbuffer(struct xattr_iter *_it,
}
static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned processed, char *buf, unsigned len)
+ unsigned int processed, char *buf, unsigned int len)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
memcpy(it->buffer + processed, buf, len);
}
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
.entry = xattr_entrymatch,
.name = xattr_namematch,
.alloc_buffer = xattr_checkbuffer,
@@ -280,7 +324,7 @@ static struct xattr_iter_handlers find_xattr_handlers = {
static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
{
int ret;
- unsigned remaining;
+ unsigned int remaining;
ret = inline_xattr_iter_begin(&it->it, inode);
if (ret < 0)
@@ -289,19 +333,20 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
remaining = ret;
while (remaining) {
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
- if (ret >= 0)
+ if (ret != -ENOATTR)
break;
}
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
- return ret < 0 ? ret : it->buffer_size;
+ return ret ? ret : it->buffer_size;
}
static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
{
struct erofs_vnode *const vi = EROFS_V(inode);
- struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
- unsigned i;
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
int ret = -ENOATTR;
for (i = 0; i < vi->xattr_shared_count; ++i) {
@@ -314,21 +359,22 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
if (i)
xattr_iter_end(&it->it, true);
- it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
- if (ret >= 0)
+ if (ret != -ENOATTR)
break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
- return ret < 0 ? ret : it->buffer_size;
+ return ret ? ret : it->buffer_size;
}
static bool erofs_xattr_user_list(struct dentry *dentry)
@@ -351,7 +397,9 @@ int erofs_getxattr(struct inode *inode, int index,
if (unlikely(name == NULL))
return -EINVAL;
- init_inode_xattrs(inode);
+ ret = init_inode_xattrs(inode);
+ if (ret)
+ return ret;
it.index = index;
@@ -446,7 +494,7 @@ static int xattr_entrylist(struct xattr_iter *_it,
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
- unsigned prefix_len;
+ unsigned int prefix_len;
const char *prefix;
const struct xattr_handler *h =
@@ -474,7 +522,7 @@ static int xattr_entrylist(struct xattr_iter *_it,
}
static int xattr_namelist(struct xattr_iter *_it,
- unsigned processed, char *buf, unsigned len)
+ unsigned int processed, char *buf, unsigned int len)
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
@@ -485,7 +533,7 @@ static int xattr_namelist(struct xattr_iter *_it,
}
static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned value_sz)
+ unsigned int value_sz)
{
struct listxattr_iter *it =
container_of(_it, struct listxattr_iter, it);
@@ -494,7 +542,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
return 1;
}
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
.entry = xattr_entrylist,
.name = xattr_namelist,
.alloc_buffer = xattr_skipvalue,
@@ -504,7 +552,7 @@ static struct xattr_iter_handlers list_xattr_handlers = {
static int inline_listxattr(struct listxattr_iter *it)
{
int ret;
- unsigned remaining;
+ unsigned int remaining;
ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
if (ret < 0)
@@ -513,19 +561,20 @@ static int inline_listxattr(struct listxattr_iter *it)
remaining = ret;
while (remaining) {
ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
- if (ret < 0)
+ if (ret)
break;
}
- xattr_iter_end(&it->it, true);
- return ret < 0 ? ret : it->buffer_ofs;
+ xattr_iter_end_final(&it->it);
+ return ret ? ret : it->buffer_ofs;
}
static int shared_listxattr(struct listxattr_iter *it)
{
struct inode *const inode = d_inode(it->dentry);
struct erofs_vnode *const vi = EROFS_V(inode);
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
- unsigned i;
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
int ret = 0;
for (i = 0; i < vi->xattr_shared_count; ++i) {
@@ -537,21 +586,22 @@ static int shared_listxattr(struct listxattr_iter *it)
if (i)
xattr_iter_end(&it->it, true);
- it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ it->it.page = erofs_get_meta_page(sb, blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
- if (ret < 0)
+ if (ret)
break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
- return ret < 0 ? ret : it->buffer_ofs;
+ return ret ? ret : it->buffer_ofs;
}
ssize_t erofs_listxattr(struct dentry *dentry,
@@ -560,7 +610,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
int ret;
struct listxattr_iter it;
- init_inode_xattrs(d_inode(dentry));
+ ret = init_inode_xattrs(d_inode(dentry));
+ if (ret)
+ return ret;
it.dentry = dentry;
it.buffer = buffer;