aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c176
1 files changed, 107 insertions, 69 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a82abe921b89..972eab7ac071 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -275,7 +275,8 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
return f2fs_reserve_block(dn, index);
}
-struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
+struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+ int rw, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
@@ -292,7 +293,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return read_mapping_page(mapping, index, NULL);
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -352,7 +353,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
return page;
f2fs_put_page(page, 0);
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, false);
if (IS_ERR(page))
return page;
@@ -372,12 +373,13 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+ bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, for_write);
if (IS_ERR(page))
return page;
@@ -411,7 +413,7 @@ struct page *get_new_data_page(struct inode *inode,
struct dnode_of_data dn;
int err;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
* before exiting, we should make sure ipage will be released
@@ -439,7 +441,7 @@ repeat:
} else {
f2fs_put_page(page, 1);
- page = get_read_data_page(inode, index, READ_SYNC);
+ page = get_read_data_page(inode, index, READ_SYNC, true);
if (IS_ERR(page))
goto repeat;
@@ -447,9 +449,9 @@ repeat:
lock_page(page);
}
got_it:
- if (new_i_size &&
- i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
+ if (new_i_size && i_size_read(inode) <
+ ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
+ i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
@@ -489,8 +491,9 @@ alloc:
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+ i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
/* direct IO doesn't use extent cache to maximize the performance */
f2fs_drop_largest_extent(dn->inode, fofs);
@@ -523,6 +526,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
while (dn.ofs_in_node < end_offset && len) {
block_t blkaddr;
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto sync_out;
+
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
if (__allocate_data_block(&dn))
@@ -565,6 +571,7 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset;
int err = 0, ofs = 1;
@@ -595,40 +602,40 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR) {
- if (flag == F2FS_GET_BLOCK_BMAP) {
- err = -ENOENT;
- goto put_out;
- } else if (flag == F2FS_GET_BLOCK_READ ||
- flag == F2FS_GET_BLOCK_DIO) {
- goto put_out;
+
+ if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto put_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto put_out;
+ allocated = true;
+ map->m_flags = F2FS_MAP_NEW;
+ } else {
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ dn.data_blkaddr != NEW_ADDR) {
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ err = -ENOENT;
+ goto put_out;
+ }
+
+ /*
+ * preallocated unwritten block should be mapped
+ * for fiemap.
+ */
+ if (dn.data_blkaddr == NEW_ADDR)
+ map->m_flags = F2FS_MAP_UNWRITTEN;
}
- /*
- * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
- * mark it as mapped and unwritten block.
- */
}
- if (dn.data_blkaddr != NULL_ADDR) {
- map->m_flags = F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- if (dn.data_blkaddr == NEW_ADDR)
- map->m_flags |= F2FS_MAP_UNWRITTEN;
- } else if (create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto put_out;
- allocated = true;
- map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- } else {
- if (flag == F2FS_GET_BLOCK_BMAP)
- err = -ENOENT;
- goto put_out;
- }
+ map->m_flags |= F2FS_MAP_MAPPED;
+ map->m_pblk = dn.data_blkaddr;
+ map->m_len = 1;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
- map->m_len = 1;
dn.ofs_in_node++;
pgofs++;
@@ -647,23 +654,35 @@ get_next:
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR &&
- flag != F2FS_GET_BLOCK_FIEMAP)
- goto put_out;
-
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
}
if (maxblocks > map->m_len) {
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR && create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto sync_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto sync_out;
+ allocated = true;
+ map->m_flags |= F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
+ } else {
+ /*
+ * we only merge preallocated unwritten blocks
+ * for fiemap.
+ */
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
+ }
}
+
/* Give more consecutive addresses for the readahead */
if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
@@ -752,6 +771,12 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret)
return ret;
+ if (f2fs_has_inline_data(inode)) {
+ ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
mutex_lock(&inode->i_mutex);
if (len >= isize) {
@@ -903,7 +928,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
- if (f2fs_map_blocks(inode, &map, 0, false))
+ if (f2fs_map_blocks(inode, &map, 0,
+ F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
@@ -936,21 +962,14 @@ submit_and_realloc:
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- struct page *cpage;
ctx = f2fs_get_crypto_ctx(inode);
if (IS_ERR(ctx))
goto set_error_page;
/* wait the page to be moved by cleaning */
- cpage = find_lock_page(
- META_MAPPING(F2FS_I_SB(inode)),
- block_nr);
- if (cpage) {
- f2fs_wait_on_page_writeback(cpage,
- DATA);
- f2fs_put_page(cpage, 1);
- }
+ f2fs_wait_on_encrypted_page_writeback(
+ F2FS_I_SB(inode), block_nr);
}
bio = bio_alloc(GFP_KERNEL,
@@ -1012,6 +1031,9 @@ static int f2fs_read_data_pages(struct file *file,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
+ struct page *page = list_entry(pages->prev, struct page, lru);
+
+ trace_f2fs_readpages(inode, page, nr_pages);
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
@@ -1041,6 +1063,11 @@ int do_write_data_page(struct f2fs_io_info *fio)
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+
+ /* wait for GCed encrypted page writeback */
+ f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
+ fio->blk_addr);
+
fio->encrypted_page = f2fs_encrypt(inode, fio->page);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
@@ -1429,6 +1456,10 @@ put_next:
f2fs_wait_on_page_writeback(page, DATA);
+ /* wait for GCed encrypted page writeback */
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+
if (len == PAGE_CACHE_SIZE)
goto out_update;
if (PageUptodate(page))
@@ -1551,10 +1582,16 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (iov_iter_rw(iter) == WRITE)
+ if (iov_iter_rw(iter) == WRITE) {
__allocate_data_blocks(inode, offset, count);
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ err = -EIO;
+ goto out;
+ }
+ }
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
+out:
if (err < 0 && iov_iter_rw(iter) == WRITE)
f2fs_write_failed(mapping, offset + count);
@@ -1636,12 +1673,13 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
- /* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- int err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
+ if (f2fs_has_inline_data(inode))
+ return 0;
+
+ /* make sure allocating whole blocks */
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ filemap_write_and_wait(mapping);
+
return generic_block_bmap(mapping, block, get_data_block_bmap);
}