From 8b895ce652dc73bcf42a0c24acfc0708a06ea7c3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 15:12:06 -0400 Subject: NFSv4.1: Handle SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED status bit correctly. If the server tells us that only some state has been revoked, then we need to run the full TEST_STATEID dog and pony show in order to discover which locks and delegations are still OK. Currently we blow away all state, which means that we lose all locks! Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 605840dc89cf..f76916169b5f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2191,12 +2191,20 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) } } -static void nfs41_handle_state_revoked(struct nfs_client *clp) +static void nfs41_handle_all_state_revoked(struct nfs_client *clp) { nfs4_reset_all_state(clp); dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); } +static void nfs41_handle_some_state_revoked(struct nfs_client *clp) +{ + nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); + nfs4_schedule_state_manager(clp); + + dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); +} + static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) { /* This will need to handle layouts too */ @@ -2231,10 +2239,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) nfs41_handle_server_reboot(clp); - if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | - SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | + if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED)) + nfs41_handle_all_state_revoked(clp); + if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED)) - nfs41_handle_state_revoked(clp); + nfs41_handle_some_state_revoked(clp); if (flags & SEQ4_STATUS_LEASE_MOVED) nfs4_schedule_lease_moved_recovery(clp); if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) -- cgit v1.2.3-59-g8ed1b From 4099287feb5833c24b5a91e1fe55207cf7559350 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 15:20:53 -0400 Subject: NFSv4.1: Handle SEQ4_STATUS_RECALLABLE_STATE_REVOKED status bit correctly Try to handle this for now by invalidating all outstanding layouts for this server and then testing all the open+lock+delegation stateids. At some later stage, we may want to optimise by separating out the testing of delegation stateids only, and adding testing of layout stateids. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f76916169b5f..469452996154 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2207,8 +2207,10 @@ static void nfs41_handle_some_state_revoked(struct nfs_client *clp) static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) { - /* This will need to handle layouts too */ - nfs_expire_all_delegations(clp); + /* FIXME: For now, we destroy all layouts. */ + pnfs_destroy_all_layouts(clp); + /* FIXME: For now, we test all delegations+open state+locks. */ + nfs41_handle_some_state_revoked(clp); dprintk("%s: Recallable state revoked on server %s!\n", __func__, clp->cl_hostname); } -- cgit v1.2.3-59-g8ed1b From b13529059cf782f9b4e4ac0ca9d524bd922163da Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 15:26:58 -0400 Subject: NFSv4.1: Handle SEQ4_STATUS_BACKCHANNEL_FAULT correctly RFC5661 states: The server has encountered an unrecoverable fault with the backchannel (e.g., it has lost track of the sequence ID for a slot in the backchannel). The client MUST stop sending more requests on the session's fore channel, wait for all outstanding requests to complete on the fore and back channel, and then destroy the session. Ensure we do so... Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 469452996154..f2e2ad894461 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2217,9 +2217,9 @@ static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) static void nfs41_handle_backchannel_fault(struct nfs_client *clp) { - nfs_expire_all_delegations(clp); - if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) - nfs4_schedule_state_manager(clp); + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); + nfs4_schedule_state_manager(clp); + dprintk("%s: server %s declared a backchannel fault\n", __func__, clp->cl_hostname); } -- cgit v1.2.3-59-g8ed1b From b15c7cdde4991be5058f442c6d08d404d56f662c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 15:01:36 -0400 Subject: NFSv4.1: nfs41_sequence_done should handle sequence flag errors Instead of just kicking off lease recovery, we should look into the sequence flag errors and handle them. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6f228b5af819..1607b41a6d84 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -616,8 +616,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) clp = session->clp; do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ - if (res->sr_status_flags != 0) - nfs4_schedule_lease_recovery(clp); + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); nfs41_update_target_slotid(slot->table, slot, res); break; case 1: -- cgit v1.2.3-59-g8ed1b From be824167e33a8b747423c90f72479deb03255d54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 14:50:46 -0400 Subject: NFSv4: Leases are renewed in sequence_done when we have sessions Ensure that the calls to renew_lease() in open_done() etc. only apply to session-less versions of NFSv4.x (i.e. NFSv4.0). Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1607b41a6d84..9f24238032f8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) static void renew_lease(const struct nfs_server *server, unsigned long timestamp) { - do_renew_lease(server->nfs_client, timestamp); + struct nfs_client *clp = server->nfs_client; + + if (!nfs4_has_session(clp)) + do_renew_lease(clp, timestamp); } struct nfs4_call_sync_data { @@ -7572,13 +7575,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) goto out; } ret = rpc_wait_for_completion_task(task); - if (!ret) { - struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; - - if (task->tk_status == 0) - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); + if (!ret) ret = task->tk_status; - } rpc_put_task(task); out: dprintk("<-- %s status=%d\n", __func__, ret); -- cgit v1.2.3-59-g8ed1b From 690edcfad0e570a9a2a42d9b98cd04efaf21489d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 8 Jul 2015 20:19:23 +0200 Subject: NFSv4.2/flexfiles: Fix a typo in the flexfiles layoutstats code Signed-off-by: Trond Myklebust --- fs/nfs/flexfilelayout/flexfilelayout.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index c12951b9551e..b3289d701eea 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args, struct nfs42_layoutstat_devinfo *devinfo; int i; - for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { + for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) { if (*dev_count >= dev_limit) break; mirror = FF_LAYOUT_COMP(pls, i); -- cgit v1.2.3-59-g8ed1b From 70f19f5869c5accfd9f371c099f21c71516591b2 Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Tue, 7 Jul 2015 13:06:05 -0500 Subject: udf: Don't corrupt unalloc spacetable when writing it For a UDF filesystem configured with an Unallocated Space Table, a filesystem operation that triggers an update to the table results in on-disk corruption that prevents remounting: udf_read_tagged: tag version 0x0000 != 0x0002 || 0x0003, block 274 For example: 1. Create a filesystem $ mkudffs --media-type=hd --blocksize=512 --lvid=BUGTEST \ --vid=BUGTEST --fsid=BUGTEST --space=unalloctable \ /dev/mmcblk0 2. Mount it # mount /dev/mmcblk0 /mnt 3. Create a file $ echo "No corruption, please" > /mnt/new.file 4. Umount # umount /mnt 5. Attempt remount # mount /dev/mmcblk0 /mnt This appears to be a longstanding bug caused by zero-initialization of the Unallocated Space Entry block buffer and only partial repopulation of required fields before writing to disk. Commit 0adfb339fd64 ("udf: Fix unalloc space handling in udf_update_inode") addressed one such field, but several others are required. Signed-off-by: Steven J. Magnani Signed-off-by: Jan Kara --- fs/udf/inode.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6afac3d561ac..8d0b3ade0ff0 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); - use->descTag.tagLocation = - cpu_to_le32(iinfo->i_location.logicalBlockNum); - crclen = sizeof(struct unallocSpaceEntry) + - iinfo->i_lenAlloc - sizeof(struct tag); - use->descTag.descCRCLength = cpu_to_le16(crclen); - use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + - sizeof(struct tag), - crclen)); - use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); + crclen = sizeof(struct unallocSpaceEntry); - goto out; + goto finish; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) @@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync) efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } + +finish: if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); @@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) fe->icbTag.numEntries = cpu_to_le16(1); } - if (S_ISDIR(inode->i_mode)) + if (iinfo->i_use) + fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; + else if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; @@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync) crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); -out: set_buffer_uptodate(bh); unlock_buffer(bh); -- cgit v1.2.3-59-g8ed1b From c5d73716e95d510c0655272ce44f318932309cb1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 9 Jul 2015 17:58:39 +0200 Subject: pNFS: Layoutreturn must invalidate all existing layout segments. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9f24238032f8..671498ca36d7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7964,16 +7964,19 @@ static void nfs4_layoutreturn_release(void *calldata) { struct nfs4_layoutreturn *lrp = calldata; struct pnfs_layout_hdr *lo = lrp->args.layout; + LIST_HEAD(freeme); dprintk("--> %s\n", __func__); spin_lock(&lo->plh_inode->i_lock); if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); + pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); pnfs_clear_layoutreturn_waitbit(lo); clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); + pnfs_free_lseg_list(&freeme); pnfs_put_layout_hdr(lrp->args.layout); nfs_iput_and_deactive(lrp->inode); kfree(calldata); -- cgit v1.2.3-59-g8ed1b From df9cecc1a36d70ec09d34e83bad452064754fdc4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 9 Jul 2015 23:38:11 +0200 Subject: pNFS: pnfs_roc_drain should return 'true' when sleeping Also clean up the case where we don't find a return-on-close layout segment. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0ba9a02c9566..8e9f467e409c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1146,14 +1146,14 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) struct pnfs_layout_segment *lseg; nfs4_stateid stateid; u32 current_seqid; - bool found = false, layoutreturn = false; + bool layoutreturn = false; spin_lock(&ino->i_lock); list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); - found = true; - goto out; + spin_unlock(&ino->i_lock); + return true; } lo = nfsi->layout; current_seqid = be32_to_cpu(lo->plh_stateid.seqid); @@ -1162,23 +1162,21 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) * a barrier, we choose the worst-case barrier. */ *barrier = current_seqid + atomic_read(&lo->plh_outstanding); -out: - if (!found) { - stateid = lo->plh_stateid; - layoutreturn = - test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + stateid = lo->plh_stateid; + layoutreturn = test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); - if (layoutreturn) { - lo->plh_block_lgets++; - pnfs_get_layout_hdr(lo); - } + if (layoutreturn) { + lo->plh_block_lgets++; + pnfs_get_layout_hdr(lo); } + spin_unlock(&ino->i_lock); if (layoutreturn) { rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); + return true; } - return found; + return false; } /* -- cgit v1.2.3-59-g8ed1b From 7f27392cd4cbcadeeaff9eedebcaec8fae9aec8e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 9 Jul 2015 18:40:01 +0200 Subject: pNFS: Fix races between return-on-close and layoutreturn. If one or more of the layout segments reports an error during I/O, then we may have to send a layoutreturn to report the error back to the NFS metadata server. This patch ensures that the return-on-close code can detect the outstanding layoutreturn, and not preempt it. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 -- fs/nfs/pnfs.c | 63 ++++++++++++++++++++++++++++++------------------------- 2 files changed, 35 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 671498ca36d7..c5c9e0d070f8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7972,8 +7972,6 @@ static void nfs4_layoutreturn_release(void *calldata) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); pnfs_clear_layoutreturn_waitbit(lo); - clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); - rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); pnfs_free_lseg_list(&freeme); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 8e9f467e409c..27e2bcaa88da 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, { struct pnfs_layout_segment *s; - if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) return false; list_for_each_entry(s, &lo->plh_segs, pls_list) @@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, return true; } +static bool +pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) +{ + if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + return false; + lo->plh_return_iomode = 0; + lo->plh_block_lgets++; + pnfs_get_layout_hdr(lo); + clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); + return true; +} + static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, struct pnfs_layout_hdr *lo, struct inode *inode) { @@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, if (pnfs_layout_need_return(lo, lseg)) { nfs4_stateid stateid; enum pnfs_iomode iomode; + bool send; stateid = lo->plh_stateid; iomode = lo->plh_return_iomode; - /* decreased in pnfs_send_layoutreturn() */ - lo->plh_block_lgets++; - lo->plh_return_iomode = 0; + send = pnfs_prepare_layoutreturn(lo); spin_unlock(&inode->i_lock); - pnfs_get_layout_hdr(lo); - - /* Send an async layoutreturn so we dont deadlock */ - pnfs_send_layoutreturn(lo, stateid, iomode, false); + if (send) { + /* Send an async layoutreturn so we dont deadlock */ + pnfs_send_layoutreturn(lo, stateid, iomode, false); + } } else spin_unlock(&inode->i_lock); } @@ -924,6 +935,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); smp_mb__after_atomic(); wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); + rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); } static int @@ -978,6 +990,7 @@ _pnfs_return_layout(struct inode *ino) LIST_HEAD(tmp_list); nfs4_stateid stateid; int status = 0, empty; + bool send; dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); @@ -1007,17 +1020,18 @@ _pnfs_return_layout(struct inode *ino) /* Don't send a LAYOUTRETURN if list was initially empty */ if (empty) { spin_unlock(&ino->i_lock); - pnfs_put_layout_hdr(lo); dprintk("NFS: %s no layout segments to return\n", __func__); - goto out; + goto out_put_layout_hdr; } set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); - lo->plh_block_lgets++; + send = pnfs_prepare_layoutreturn(lo); spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); - - status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); + if (send) + status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); +out_put_layout_hdr: + pnfs_put_layout_hdr(lo); out: dprintk("<-- %s status: %d\n", __func__, status); return status; @@ -1097,13 +1111,9 @@ bool pnfs_roc(struct inode *ino) out_noroc: if (lo) { stateid = lo->plh_stateid; - layoutreturn = - test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, - &lo->plh_flags); - if (layoutreturn) { - lo->plh_block_lgets++; - pnfs_get_layout_hdr(lo); - } + if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags)) + layoutreturn = pnfs_prepare_layoutreturn(lo); } spin_unlock(&ino->i_lock); if (layoutreturn) { @@ -1163,16 +1173,14 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) */ *barrier = current_seqid + atomic_read(&lo->plh_outstanding); stateid = lo->plh_stateid; - layoutreturn = test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, - &lo->plh_flags); - if (layoutreturn) { - lo->plh_block_lgets++; - pnfs_get_layout_hdr(lo); - } + if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags)) + layoutreturn = pnfs_prepare_layoutreturn(lo); + if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); spin_unlock(&ino->i_lock); if (layoutreturn) { - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); return true; } @@ -1693,7 +1701,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode, spin_lock(&inode->i_lock); /* set failure bit so that pnfs path will be retried later */ pnfs_layout_set_fail_bit(lo, iomode); - set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); if (lo->plh_return_iomode == 0) lo->plh_return_iomode = range.iomode; else if (lo->plh_return_iomode != range.iomode) -- cgit v1.2.3-59-g8ed1b From bdc59cf233433ddd2cd671db02bd6b52323ce63d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 9 Jul 2015 23:54:09 +0200 Subject: pNFS: pnfs_roc_drain() fix a race with open If a process reopens the file before we can send off the CLOSE/DELEGRETURN, then pnfs_roc_drain() may end up waiting for a new set of layout segments that are marked as return-on-close, but haven't yet been returned. Fix this by only waiting for those layout segments that were invalidated in pnfs_roc(). Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 27e2bcaa88da..b02e32e2abeb 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1159,12 +1159,15 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) bool layoutreturn = false; spin_lock(&ino->i_lock); - list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) - if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); - spin_unlock(&ino->i_lock); - return true; - } + list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) { + if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) + continue; + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) + continue; + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + spin_unlock(&ino->i_lock); + return true; + } lo = nfsi->layout; current_seqid = be32_to_cpu(lo->plh_stateid.seqid); -- cgit v1.2.3-59-g8ed1b From faa4a54f0be15b5d81b574fb5a40db24345d1a6c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 9 Jul 2015 18:24:07 +0200 Subject: pNFS: Don't throw out valid layout segments It is OK for layout segments to remain hashed even if no-one holds any references to them, provided that the segments are still valid. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b02e32e2abeb..18aa3b7962eb 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -422,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { + spin_unlock(&inode->i_lock); + return; + } pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); spin_unlock(&inode->i_lock); @@ -462,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); if (atomic_dec_and_test(&lseg->pls_refcount)) { struct pnfs_layout_hdr *lo = lseg->pls_layout; + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) + return; pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); pnfs_free_lseg_async(lseg); -- cgit v1.2.3-59-g8ed1b From d725e66c06ab440032f49ef17e960896d0ec6d49 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 21 Jul 2015 16:06:53 -0700 Subject: Revert "fsnotify: fix oops in fsnotify_clear_marks_by_group_flags()" This reverts commit a2673b6e040663bf16a552f8619e6bde9f4b9acf. Kinglong Mee reports a memory leak with that patch, and Jan Kara confirms: "Thanks for report! You are right that my patch introduces a race between fsnotify kthread and fsnotify_destroy_group() which can result in leaking inotify event on group destruction. I haven't yet decided whether the right fix is not to queue events for dying notification group (as that is pointless anyway) or whether we should just fix the original problem differently... Whenever I look at fsnotify code mark handling I get lost in the maze of locks, lists, and subtle differences between how different notification systems handle notification marks :( I'll think about it over night" and after thinking about it, Jan says: "OK, I have looked into the code some more and I found another relatively simple way of fixing the original oops. It will be IMHO better than trying to fixup this issue which has more potential for breakage. I'll ask Linus to revert the fsnotify fix he already merged and send a new fix" Reported-by: Kinglong Mee Requested-by: Jan Kara Cc: Andrew Morton Signed-off-by: Linus Torvalds --- fs/notify/mark.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 3e594ce41010..92e48c70f0f0 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, BUG(); list_del_init(&mark->g_list); + spin_unlock(&mark->lock); if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) iput(inode); + /* release lock temporarily */ + mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->g_list, &destroy_list); spin_unlock(&destroy_lock); wake_up(&destroy_waitq); + /* + * We don't necessarily have a ref on mark from caller so the above destroy + * may have actually freed it, unless this group provides a 'freeing_mark' + * function which must be holding a reference. + */ + + /* + * Some groups like to know that marks are being freed. This is a + * callback to the group function to let it know that this mark + * is being freed. + */ + if (group->ops->freeing_mark) + group->ops->freeing_mark(mark, group); /* * __fsnotify_update_child_dentry_flags(inode); @@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, */ atomic_dec(&group->num_marks); + + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, @@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, /* * Destroy all marks in the given list. The marks must be already detached from - * the original inode / vfsmount. Note that we can race with - * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each - * mark so they won't get freed from under us and nobody else touches our - * free_list list_head. + * the original inode / vfsmount. */ void fsnotify_destroy_marks(struct list_head *to_free) { @@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, } /* - * Clear any marks in a group in which mark->flags & flags is true. + * clear any marks in a group in which mark->flags & flags is true */ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) @@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; - struct fsnotify_group *group; for (;;) { spin_lock(&destroy_lock); @@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored) list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { list_del_init(&mark->g_list); - group = mark->group; - /* - * Some groups like to know that marks are being freed. - * This is a callback to the group function to let it - * know that this mark is being freed. - */ - if (group && group->ops->freeing_mark) - group->ops->freeing_mark(mark, group); fsnotify_put_mark(mark); } -- cgit v1.2.3-59-g8ed1b From 3c38cbe2ade88240fabb585b408f779ad3b9a31b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 22 Jul 2015 13:46:13 -0400 Subject: NFSv4: We must set NFS_OPEN_STATE flag in nfs_resync_open_stateid_locked Otherwise, nfs4_select_rw_stateid() will always return the zero stateid instead of the correct open stateid. Fixes: f95549cf24660 ("NFSv4: More CLOSE/OPEN races") Cc: stable@vger.kernel.org # 4.0+ Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c5c9e0d070f8..9264994ec9d3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1200,12 +1200,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state, static void nfs_resync_open_stateid_locked(struct nfs4_state *state) { + if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) + return; if (state->n_wronly) set_bit(NFS_O_WRONLY_STATE, &state->flags); if (state->n_rdonly) set_bit(NFS_O_RDONLY_STATE, &state->flags); if (state->n_rdwr) set_bit(NFS_O_RDWR_STATE, &state->flags); + set_bit(NFS_OPEN_STATE, &state->flags); } static void nfs_clear_open_stateid_locked(struct nfs4_state *state, -- cgit v1.2.3-59-g8ed1b From 2b83d3de4c18af49800e0b26ae013db4fcf43a4a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 20:06:38 -0400 Subject: NFSv4/pnfs: Ensure we don't miss a file extension pNFS writes don't return attributes, however that doesn't mean that we should ignore the fact that they may be extending the file. This patch ensures that if a write is seen to extend the file, then we always set an attribute barrier, and update the cached file size. Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 359e9ad596c9..0e6a2b8786b4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1378,24 +1378,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, { struct nfs_pgio_args *argp = &hdr->args; struct nfs_pgio_res *resp = &hdr->res; + u64 size = argp->offset + resp->count; if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) + fattr->size = size; + if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { + fattr->valid &= ~NFS_ATTR_FATTR_SIZE; return; - if (argp->offset + resp->count != fattr->size) - return; - if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) + } + if (size != fattr->size) return; /* Set attribute barrier */ nfs_fattr_set_barrier(fattr); + /* ...and update size */ + fattr->valid |= NFS_ATTR_FATTR_SIZE; } void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) { - struct nfs_fattr *fattr = hdr->res.fattr; + struct nfs_fattr *fattr = &hdr->fattr; struct inode *inode = hdr->inode; - if (fattr == NULL) - return; spin_lock(&inode->i_lock); nfs_writeback_check_extend(hdr, fattr); nfs_post_op_update_inode_force_wcc_locked(inode, fattr); -- cgit v1.2.3-59-g8ed1b From 85a23cee3f2c928475f31777ead5a71340a12fc3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 11:02:53 -0400 Subject: NFS: Don't revalidate the mapping if both size and change attr are up to date If we've ensured that the size and the change attribute are both correct, then there is no point in marking those attributes as needing revalidation again. Only do so if we know the size is incorrect and was not updated. Fixes: f2467b6f64da ("NFS: Clear NFS_INO_REVAL_PAGECACHE when...") Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index b77b328a06d7..d654661defb3 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1244,9 +1244,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if (fattr->valid & NFS_ATTR_FATTR_SIZE) { cur_size = i_size_read(inode); new_isize = nfs_size_to_loff_t(fattr->size); - if (cur_size != new_isize && nfsi->nrequests == 0) + if (cur_size != new_isize) invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; } + if (nfsi->nrequests != 0) + invalid &= ~NFS_INO_REVAL_PAGECACHE; /* Have any file permissions changed? */ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) @@ -1684,8 +1686,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_REVAL_PAGECACHE; + | NFS_INO_INVALID_ACL; if (S_ISDIR(inode->i_mode)) nfs_force_lookup_revalidate(inode); inode->i_version = fattr->change_attr; @@ -1717,7 +1718,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if ((nfsi->nrequests == 0) || new_isize > cur_isize) { i_size_write(inode, new_isize); invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; - invalid &= ~NFS_INO_REVAL_PAGECACHE; } dprintk("NFS: isize change on server for file %s/%ld " "(%Ld to %Ld)\n", -- cgit v1.2.3-59-g8ed1b From 5c675d6420511e035c150e420ab26d0306bbb736 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 16:07:04 -0400 Subject: NFS: Set NFS_INO_REVAL_PAGECACHE if the change attribute is uninitialised We can't allow caching of data until the change attribute has been initialised correctly. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d654661defb3..426e4f8207ef 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -443,7 +443,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st if (fattr->valid & NFS_ATTR_FATTR_CHANGE) inode->i_version = fattr->change_attr; else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR + | NFS_INO_REVAL_PAGECACHE); if (fattr->valid & NFS_ATTR_FATTR_SIZE) inode->i_size = nfs_size_to_loff_t(fattr->size); else -- cgit v1.2.3-59-g8ed1b From cd812599796f500b042f5464b6665755eca21137 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 5 Jul 2015 11:12:07 -0400 Subject: NFS: Remove the "NFS_CAP_CHANGE_ATTR" capability Setting the change attribute has been mandatory for all NFS versions, since commit 3a1556e8662c ("NFSv2/v3: Simulate the change attribute"). We should therefore not have anything be conditional on it being set/unset. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 +- fs/nfs/inode.c | 4 ++-- fs/nfs/nfs4proc.c | 3 --- include/linux/nfs_fs_sb.h | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ecebb406cc1a..4a90c9bb3135 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server, server->options = data->options; server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| - NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; + NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME; if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 426e4f8207ef..0adc7d245b3d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -442,7 +442,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); if (fattr->valid & NFS_ATTR_FATTR_CHANGE) inode->i_version = fattr->change_attr; - else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) + else nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE); if (fattr->valid & NFS_ATTR_FATTR_SIZE) @@ -1692,7 +1692,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_force_lookup_revalidate(inode); inode->i_version = fattr->change_attr; } - } else if (server->caps & NFS_CAP_CHANGE_ATTR) + } else nfsi->cache_validity |= save_cache_validity; if (fattr->valid & NFS_ATTR_FATTR_MTIME) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9264994ec9d3..c85ffe67b5f3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8591,7 +8591,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .minor_version = 0, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK, .init_client = nfs40_init_client, .shutdown_client = nfs40_shutdown_client, @@ -8617,7 +8616,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1, @@ -8640,7 +8638,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .minor_version = 2, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a2ea1491d3df..20bc8e51b161 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -220,7 +220,7 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) -#define NFS_CAP_CHANGE_ATTR (1U << 5) +/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ #define NFS_CAP_FILEID (1U << 6) #define NFS_CAP_MODE (1U << 7) #define NFS_CAP_NLINK (1U << 8) -- cgit v1.2.3-59-g8ed1b From 8a7330130470e173832385861c1aa6e37a4e50d2 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Wed, 15 Jul 2015 11:48:14 +0800 Subject: btrfs: Fix lockdep warning of btrfs_run_delayed_iputs() Liu Bo reported a lockdep warning of delayed_iput_sem in xfstests generic/241: [ 2061.345955] ============================================= [ 2061.346027] [ INFO: possible recursive locking detected ] [ 2061.346027] 4.1.0+ #268 Tainted: G W [ 2061.346027] --------------------------------------------- [ 2061.346027] btrfs-cleaner/3045 is trying to acquire lock: [ 2061.346027] (&fs_info->delayed_iput_sem){++++..}, at: [] btrfs_run_delayed_iputs+0x6b/0x100 [ 2061.346027] but task is already holding lock: [ 2061.346027] (&fs_info->delayed_iput_sem){++++..}, at: [] btrfs_run_delayed_iputs+0x6b/0x100 [ 2061.346027] other info that might help us debug this: [ 2061.346027] Possible unsafe locking scenario: [ 2061.346027] CPU0 [ 2061.346027] ---- [ 2061.346027] lock(&fs_info->delayed_iput_sem); [ 2061.346027] lock(&fs_info->delayed_iput_sem); [ 2061.346027] *** DEADLOCK *** It is rarely happened, about 1/400 in my test env. The reason is recursion of btrfs_run_delayed_iputs(): cleaner_kthread -> btrfs_run_delayed_iputs() *1 -> get delayed_iput_sem lock *2 -> iput() -> ... -> btrfs_commit_transaction() -> btrfs_run_delayed_iputs() *1 -> get delayed_iput_sem lock (dead lock) *2 *1: recursion of btrfs_run_delayed_iputs() *2: warning of lockdep about delayed_iput_sem When fs is in high stress, new iputs may added into fs_info->delayed_iputs list when btrfs_run_delayed_iputs() is running, which cause second btrfs_run_delayed_iputs() run into down_read(&fs_info->delayed_iput_sem) again, and cause above lockdep warning. Actually, it will not cause real problem because both locks are read lock, but to avoid lockdep warning, we can do a fix. Fix: Don't do btrfs_run_delayed_iputs() in btrfs_commit_transaction() for cleaner_kthread thread to break above recursion path. cleaner_kthread is calling btrfs_run_delayed_iputs() explicitly in code, and don't need to call btrfs_run_delayed_iputs() again in btrfs_commit_transaction(), it also give us a bonus to avoid stack overflow. Test: No above lockdep warning after patch in 1200 generic/241 tests. Reported-by: Liu Bo Signed-off-by: Zhao Lei Reviewed-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 51e0f0d0053e..f5021fcb154e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_trans_handle_cachep, trans); - if (current != root->fs_info->transaction_kthread) + if (current != root->fs_info->transaction_kthread && + current != root->fs_info->cleaner_kthread) btrfs_run_delayed_iputs(root); return ret; -- cgit v1.2.3-59-g8ed1b From 95ab1f64908795a2edd6b847eca94a0c63a44be4 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Wed, 15 Jul 2015 21:02:09 +0800 Subject: btrfs: Avoid NULL pointer dereference of free_extent_buffer when read_tree_block() fail When read_tree_block() failed, we can see following dmesg: [ 134.371389] BUG: unable to handle kernel NULL pointer dereference at 0000000000000063 [ 134.372236] IP: [] free_extent_buffer+0x21/0x90 [ 134.372236] PGD 0 [ 134.372236] Oops: 0000 [#1] SMP [ 134.372236] Modules linked in: [ 134.372236] CPU: 0 PID: 2289 Comm: mount Not tainted 4.2.0-rc1_HEAD_c65b99f046843d2455aa231747b5a07a999a9f3d_+ #115 [ 134.372236] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5.1-0-g8936dbb-20141113_115728-nilsson.home.kraxel.org 04/01/2014 [ 134.372236] task: ffff88003b6e1a00 ti: ffff880011e60000 task.ti: ffff880011e60000 [ 134.372236] RIP: 0010:[] [] free_extent_buffer+0x21/0x90 ... [ 134.372236] Call Trace: [ 134.372236] [] free_root_extent_buffers+0x91/0xb0 [ 134.372236] [] free_root_pointers+0x17d/0x190 [ 134.372236] [] open_ctree+0x1ca0/0x25b0 [ 134.372236] [] ? disk_name+0x97/0xb0 [ 134.372236] [] btrfs_mount+0x8fa/0xab0 ... Reason: read_tree_block() changed to return error number on fail, and this value(not NULL) is set to tree_root->node, then subsequent code will run to: free_root_pointers() ->free_root_extent_buffers() ->free_extent_buffer() ->atomic_read((extent_buffer *)(-E_XXX)->refs); and trigger above error. Fix: Set tree_root->node to NULL on fail to make error_handle code happy. Signed-off-by: Zhao Lei Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e5aad7f535aa..84cbbb2d562e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb, !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); + chunk_root->node = NULL; goto fail_tree_roots; } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); @@ -2879,7 +2880,7 @@ retry_root_backup: !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); - + tree_root->node = NULL; goto recovery_tree_root; } -- cgit v1.2.3-59-g8ed1b From 3e303ea60db3f0222c25a13f39a7cca7bf860df0 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Fri, 17 Jul 2015 23:45:23 +0800 Subject: btrfs: its btrfs_err() instead of btrfs_error() sorry I indented to use btrfs_err() and I have no idea how btrfs_error() got there. infact I was thinking about these kind of oversights since these two func are too closely named. Signed-off-by: Anand Jain Reviewed-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/dev-replace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 862fbc206755..564a7de17d99 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); if (ret) - btrfs_error(root->fs_info, ret, "kobj add dev failed"); + btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret); printk_in_rcu(KERN_INFO "BTRFS: dev_replace from %s (devid %llu) to %s started\n", -- cgit v1.2.3-59-g8ed1b From 00d80e342c0f4f1990ab69f594ee1e2348e51dd9 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 20 Jul 2015 14:56:20 +0100 Subject: Btrfs: fix quick exhaustion of the system array in the superblock Omar reported that after commit 4fbcdf669454 ("Btrfs: fix -ENOSPC when finishing block group creation"), introduced in 4.2-rc1, the following test was failing due to exhaustion of the system array in the superblock: #!/bin/bash truncate -s 100T big.img mkfs.btrfs big.img mount -o loop big.img /mnt/loop num=5 sz=10T for ((i = 0; i < $num; i++)); do echo fallocate $i $sz fallocate -l $sz /mnt/loop/testfile$i done btrfs filesystem sync /mnt/loop for ((i = 0; i < $num; i++)); do echo rm $i rm /mnt/loop/testfile$i btrfs filesystem sync /mnt/loop done umount /mnt/loop This made btrfs_add_system_chunk() fail with -EFBIG due to excessive allocation of system block groups. This happened because the test creates a large number of data block groups per transaction and when committing the transaction we start the writeout of the block group caches for all the new new (dirty) block groups, which results in pre-allocating space for each block group's free space cache using the same transaction handle. That in turn often leads to creation of more block groups, and all get attached to the new_bgs list of the same transaction handle to the point of getting a list with over 1500 elements, and creation of new block groups leads to the need of reserving space in the chunk block reserve and often creating a new system block group too. So that made us quickly exhaust the chunk block reserve/system space info, because as of the commit mentioned before, we do reserve space for each new block group in the chunk block reserve, unlike before where we would not and would at most allocate one new system block group and therefore would only ensure that there was enough space in the system space info to allocate 1 new block group even if we ended up allocating thousands of new block groups using the same transaction handle. That worked most of the time because the computed required space at check_system_chunk() is very pessimistic (assumes a chunk tree height of BTRFS_MAX_LEVEL/8 and that all nodes/leafs in a path will be COWed and split) and since the updates to the chunk tree all happen at btrfs_create_pending_block_groups it is unlikely that a path needs to be COWed more than once (unless writepages() for the btree inode is called by mm in between) and that compensated for the need of creating any new nodes/leads in the chunk tree. So fix this by ensuring we don't accumulate a too large list of new block groups in a transaction's handles new_bgs list, inserting/updating the chunk tree for all accumulated new block groups and releasing the unused space from the chunk block reserve whenever the list becomes sufficiently large. This is a generic solution even though the problem currently can only happen when starting the writeout of the free space caches for all dirty block groups (btrfs_start_dirty_block_groups()). Reported-by: Omar Sandoval Signed-off-by: Filipe Manana Tested-by: Omar Sandoval Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 171312d51799..07204bf601ed 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4227,6 +4227,24 @@ out: space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); mutex_unlock(&fs_info->chunk_mutex); + /* + * When we allocate a new chunk we reserve space in the chunk block + * reserve to make sure we can COW nodes/leafs in the chunk tree or + * add new nodes/leafs to it if we end up needing to do it when + * inserting the chunk item and updating device items as part of the + * second phase of chunk allocation, performed by + * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a + * large number of new block groups to create in our transaction + * handle's new_bgs list to avoid exhausting the chunk block reserve + * in extreme cases - like having a single transaction create many new + * block groups when starting to write out the free space caches of all + * the block groups that were made dirty during the lifetime of the + * transaction. + */ + if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { + btrfs_create_pending_block_groups(trans, trans->root); + btrfs_trans_release_chunk_metadata(trans); + } return ret; } -- cgit v1.2.3-59-g8ed1b From f2d0a123bcf16d1a9cf7942ddc98e0ef77862c2b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 17 Jul 2015 14:15:30 -0500 Subject: mnt: Clarify and correct the disconnect logic in umount_tree rmdir mntpoint will result in an infinite loop when there is a mount locked on the mountpoint in another mount namespace. This is because the logic to test to see if a mount should be disconnected in umount_tree is buggy. Move the logic to decide if a mount should remain connected to it's mountpoint into it's own function disconnect_mount so that clarity of expression instead of terseness of expression becomes a virtue. When the conditions where it is invalid to leave a mount connected are first ruled out, the logic for deciding if a mount should be disconnected becomes much clearer and simpler. Fixes: e0c9c0afd2fc958ffa34b697972721d81df8a56f mnt: Update detach_mounts to leave mounts connected Fixes: ce07d891a0891d3c0d0c2d73d577490486b809e1 mnt: Honor MNT_LOCKED when detaching mounts Cc: stable@vger.kernel.org Signed-off-by: "Eric W. Biederman" --- fs/namespace.c | 35 +++++++++++++++++++++++++++++++---- fs/pnode.h | 2 -- 2 files changed, 31 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index c7cb8a526c05..7829eab0b706 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1361,6 +1361,36 @@ enum umount_tree_flags { UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; + +static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) +{ + /* Leaving mounts connected is only valid for lazy umounts */ + if (how & UMOUNT_SYNC) + return true; + + /* A mount without a parent has nothing to be connected to */ + if (!mnt_has_parent(mnt)) + return true; + + /* Because the reference counting rules change when mounts are + * unmounted and connected, umounted mounts may not be + * connected to mounted mounts. + */ + if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) + return true; + + /* Has it been requested that the mount remain connected? */ + if (how & UMOUNT_CONNECTED) + return false; + + /* Is the mount locked such that it needs to remain connected? */ + if (IS_MNT_LOCKED(mnt)) + return false; + + /* By default disconnect the mount */ + return true; +} + /* * mount_lock must be held * namespace_sem must be held for write @@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; - disconnect = !(((how & UMOUNT_CONNECTED) && - mnt_has_parent(p) && - (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || - IS_MNT_LOCKED_AND_LAZY(p)); + disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); diff --git a/fs/pnode.h b/fs/pnode.h index 7114ce6e6b9e..0fcdbe7ca648 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -20,8 +20,6 @@ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) -#define IS_MNT_LOCKED_AND_LAZY(m) \ - (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) #define CL_EXPIRE 0x01 #define CL_SLAVE 0x02 -- cgit v1.2.3-59-g8ed1b From fe78fcc85a2046c51f1535710996860557eeec20 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 17 Jul 2015 14:54:27 -0500 Subject: mnt: In detach_mounts detach the appropriate unmounted mount The handling of in detach_mounts of unmounted but connected mounts is buggy and can lead to an infinite loop. Correct the handling of unmounted mounts in detach_mount. When the mountpoint of an unmounted but connected mount is connected to a dentry, and that dentry is deleted we need to disconnect that mount from the parent mount and the deleted dentry. Nothing changes for the unmounted and connected children. They can be safely ignored. Cc: stable@vger.kernel.org Fixes: ce07d891a0891d3c0d0c2d73d577490486b809e1 mnt: Honor MNT_LOCKED when detaching mounts Signed-off-by: "Eric W. Biederman" --- fs/namespace.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 7829eab0b706..2b8aa15fd6df 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1565,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry) while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { - struct mount *p, *tmp; - list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { - hlist_add_head(&p->mnt_umount.s_list, &unmounted); - umount_mnt(p); - } + hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); + umount_mnt(mnt); } else umount_tree(mnt, UMOUNT_CONNECTED); } -- cgit v1.2.3-59-g8ed1b From 5aa2a96b34073fb775917fbc7d5578a04164b9f7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Jul 2015 14:27:09 -0400 Subject: block: export bio_associate_*() and wbc_account_io() bio_associate_blkcg(), bio_associate_current() and wbc_account_io() are used to implement cgroup writeback support for filesystems and thus need to be exported. Export them. Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell Signed-off-by: Jens Axboe --- block/bio.c | 2 ++ fs/fs-writeback.c | 1 + 2 files changed, 3 insertions(+) (limited to 'fs') diff --git a/block/bio.c b/block/bio.c index 2a00d349cd68..b671a0ff731c 100644 --- a/block/bio.c +++ b/block/bio.c @@ -2009,6 +2009,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) bio->bi_css = blkcg_css; return 0; } +EXPORT_SYMBOL_GPL(bio_associate_blkcg); /** * bio_associate_current - associate a bio with %current @@ -2039,6 +2040,7 @@ int bio_associate_current(struct bio *bio) bio->bi_css = task_get_css(current, blkio_cgrp_id); return 0; } +EXPORT_SYMBOL_GPL(bio_associate_current); /** * bio_disassociate_task - undo bio_associate_current() diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f0520bcf2094..518c6294bf6c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page, else wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); } +EXPORT_SYMBOL_GPL(wbc_account_io); /** * inode_congested - test whether an inode is congested -- cgit v1.2.3-59-g8ed1b From 548aedac5128e4bb256bab5c8e1db88e626697fc Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 13 Jul 2015 17:44:14 -0700 Subject: f2fs: handle error cases in move_encrypted_block This patch fixes some missing error handlers. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/gc.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e1e73617d13b..883a841dfc6d 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -556,27 +556,34 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) if (!fio.encrypted_page) goto put_out; - f2fs_submit_page_bio(&fio); + err = f2fs_submit_page_bio(&fio); + if (err) + goto put_page_out; + + /* write page */ + lock_page(fio.encrypted_page); + + if (unlikely(!PageUptodate(fio.encrypted_page))) + goto put_page_out; + if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) + goto put_page_out; + + set_page_writeback(fio.encrypted_page); /* allocate block address */ f2fs_wait_on_page_writeback(dn.node_page, NODE); - allocate_data_block(fio.sbi, NULL, fio.blk_addr, &fio.blk_addr, &sum, CURSEG_COLD_DATA); - dn.data_blkaddr = fio.blk_addr; - - /* write page */ - lock_page(fio.encrypted_page); - set_page_writeback(fio.encrypted_page); fio.rw = WRITE_SYNC; f2fs_submit_page_mbio(&fio); + dn.data_blkaddr = fio.blk_addr; set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); if (page->index == 0) set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); - +put_page_out: f2fs_put_page(fio.encrypted_page, 1); put_out: f2fs_put_dnode(&dn); -- cgit v1.2.3-59-g8ed1b From 6282adbf932c226f76e1b83e074448c79976fe75 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Sat, 25 Jul 2015 00:29:17 -0700 Subject: f2fs: call set_page_dirty to attach i_wb for cgroup The cgroup attaches inode->i_wb via mark_inode_dirty and when set_page_writeback is called, __inc_wb_stat() updates i_wb's stat. So, we need to explicitly call set_page_dirty->__mark_inode_dirty in prior to any writebacking pages. This patch should resolve the following kernel panic reported by Andreas Reis. https://bugzilla.kernel.org/show_bug.cgi?id=101801 --- Comment #2 from Andreas Reis --- BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [] __percpu_counter_add+0x1a/0x90 PGD 2951ff067 PUD 2df43f067 PMD 0 Oops: 0000 [#1] PREEMPT SMP Modules linked in: CPU: 7 PID: 10356 Comm: gcc Tainted: G W 4.2.0-1-cu #1 Hardware name: Gigabyte Technology Co., Ltd. G1.Sniper M5/G1.Sniper M5, BIOS T01 02/03/2015 task: ffff880295044f80 ti: ffff880295140000 task.ti: ffff880295140000 RIP: 0010:[] [] __percpu_counter_add+0x1a/0x90 RSP: 0018:ffff880295143ac8 EFLAGS: 00010082 RAX: 0000000000000003 RBX: ffffea000a526d40 RCX: 0000000000000001 RDX: 0000000000000020 RSI: 0000000000000001 RDI: 0000000000000088 RBP: ffff880295143ae8 R08: 0000000000000000 R09: ffff88008f69bb30 R10: 00000000fffffffa R11: 0000000000000000 R12: 0000000000000088 R13: 0000000000000001 R14: ffff88041d099000 R15: ffff880084a205d0 FS: 00007f8549374700(0000) GS:ffff88042f3c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000a8 CR3: 000000033e1d5000 CR4: 00000000001406e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Stack: 0000000000000000 ffffea000a526d40 ffff880084a20738 ffff880084a20750 ffff880295143b48 ffffffff811cc91e ffff880000000000 0000000000000296 0000000000000000 ffff880417090198 0000000000000000 ffffea000a526d40 Call Trace: [] __test_set_page_writeback+0xde/0x1d0 [] do_write_data_page+0xe7/0x3a0 [] gc_data_segment+0x5aa/0x640 [] do_garbage_collect+0x138/0x150 [] f2fs_gc+0x1be/0x3e0 [] f2fs_balance_fs+0x81/0x90 [] f2fs_unlink+0x47/0x1d0 [] vfs_unlink+0x109/0x1b0 [] do_unlinkat+0x287/0x2c0 [] SyS_unlink+0x16/0x20 [] entry_SYSCALL_64_fastpath+0x12/0x71 Code: 41 5e 5d c3 0f 1f 00 66 2e 0f 1f 84 00 00 00 00 00 55 48 89 e5 41 55 49 89 f5 41 54 49 89 fc 53 48 83 ec 08 65 ff 05 e6 d9 b6 7e <48> 8b 47 20 48 63 ca 65 8b 18 48 63 db 48 01 f3 48 39 cb 7d 0a RIP [] __percpu_counter_add+0x1a/0x90 RSP CR2: 00000000000000a8 ---[ end trace 5132449a58ed93a3 ]--- note: gcc[10356] exited with preempt_count 2 Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 2 -- fs/f2fs/file.c | 7 ++++--- fs/f2fs/gc.c | 7 ++++++- fs/f2fs/inline.c | 2 ++ fs/f2fs/segment.c | 1 + 5 files changed, 13 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9bedfa8dd3a5..f71e19a9dd3c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page) return 1; } - mark_inode_dirty(inode); - if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); update_dirty_page(inode, page); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index ada2a3dd701a..b0f38c3b37f4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) if (ret) return ret; - if (f2fs_is_atomic_file(inode)) + if (f2fs_is_atomic_file(inode)) { + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); commit_inmem_pages(inode, false); + } ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); mnt_drop_write_file(filp); - clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return ret; } @@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) f2fs_balance_fs(F2FS_I_SB(inode)); if (f2fs_is_atomic_file(inode)) { - commit_inmem_pages(inode, false); clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); + commit_inmem_pages(inode, false); } if (f2fs_is_volatile_file(inode)) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 883a841dfc6d..22fb5ef37966 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -568,6 +568,11 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) goto put_page_out; + set_page_dirty(fio.encrypted_page); + f2fs_wait_on_page_writeback(fio.encrypted_page, META); + if (clear_page_dirty_for_io(fio.encrypted_page)) + dec_page_count(fio.sbi, F2FS_DIRTY_META); + set_page_writeback(fio.encrypted_page); /* allocate block address */ @@ -612,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type) .page = page, .encrypted_page = NULL, }; + set_page_dirty(page); f2fs_wait_on_page_writeback(page, DATA); - if (clear_page_dirty_for_io(page)) inode_dec_dirty_pages(inode); set_cold_data(page); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 38e75fb1e488..a13ffcc32992 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) kunmap_atomic(dst_addr); SetPageUptodate(page); no_update: + set_page_dirty(page); + /* clear dirty state */ dirty = clear_page_dirty_for_io(page); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1eb343768781..61b97f9cb9f6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort) if (!abort) { lock_page(cur->page); if (cur->page->mapping == inode->i_mapping) { + set_page_dirty(cur->page); f2fs_wait_on_page_writeback(cur->page, DATA); if (clear_page_dirty_for_io(cur->page)) inode_dec_dirty_pages(inode); -- cgit v1.2.3-59-g8ed1b From 03d5eb65b53889fe98a5ecddfe205c16e3093190 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 27 Jul 2015 10:23:19 -0400 Subject: NFS: Fix a memory leak in nfs_do_recoalesce If the function exits early, then we must put those requests that were not processed back onto the &mirror->pg_list so they can be cleaned up by nfs_pgio_error(). Fixes: a7d42ddb30997 ("nfs: add mirroring support to pgio layer") Cc: stable@vger.kernel.org # v4.0+ Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 1da68d3b1eda..8ea5920fb777 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1109,8 +1109,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) nfs_list_remove_request(req); if (__nfs_pageio_add_request(desc, req)) continue; - if (desc->pg_error < 0) + if (desc->pg_error < 0) { + list_splice_tail(&head, &mirror->pg_list); + mirror->pg_recoalesce = 1; return 0; + } break; } } while (mirror->pg_recoalesce); -- cgit v1.2.3-59-g8ed1b From d4c30454db732d5a377abfc309c7a629f3bbaeb0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 24 Jul 2015 12:31:33 -0400 Subject: NFS: Don't clear desc->pg_moreio in nfs_do_recoalesce() Recoalescing does not affect whether or not we've already sent off I/O, and doing so means that we end up sending a bunch of synchronous for cases where we actually need to be using unstable writes. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 8ea5920fb777..4984bbe55ff1 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) mirror->pg_base = 0; mirror->pg_recoalesce = 0; - desc->pg_moreio = 0; - while (!list_empty(&head)) { struct nfs_page *req; -- cgit v1.2.3-59-g8ed1b From bdcc2cd14e4e86f1fb19c78e191a1b8da3d6dea0 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 23 Jul 2015 11:08:43 -0400 Subject: NFSv4.2: handle NFS-specific llseek errors Handle NFS-specific llseek errors instead of letting them leak out to userspace. Reported-by: Benjamin Coddington Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs42proc.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index f486b80f927a..d731bbf974aa 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) return err; } -loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) +static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) { struct inode *inode = file_inode(filep); struct nfs42_seek_args args = { @@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); } +loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) +{ + struct nfs_server *server = NFS_SERVER(file_inode(filep)); + struct nfs4_exception exception = { }; + int err; + + do { + err = _nfs42_proc_llseek(filep, offset, whence); + if (err == -ENOTSUPP) + return -EOPNOTSUPP; + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + + return err; +} + + static void nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) { -- cgit v1.2.3-59-g8ed1b From 3471648a7569512e10f154cdfe5076c341a5c099 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 10 Jul 2015 15:58:42 -0400 Subject: nfs: plug memory leak when ->prepare_layoutcommit fails "data" is currently leaked when the prepare_layoutcommit operation returns an error. Put the cred before taking the spinlock in that case, take the lock and then goto out_unlock which will drop the lock and then free "data". Signed-off-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 18aa3b7962eb..70bf706b1090 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -2221,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) if (ld->prepare_layoutcommit) { status = ld->prepare_layoutcommit(&data->args); if (status) { + put_rpccred(data->cred); spin_lock(&inode->i_lock); set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); if (end_pos > nfsi->layout->plh_lwb) nfsi->layout->plh_lwb = end_pos; - spin_unlock(&inode->i_lock); - put_rpccred(data->cred); - goto clear_layoutcommitting; + goto out_unlock; } } -- cgit v1.2.3-59-g8ed1b From a49c269111a5b3c1fd2a98f36fa27423b94549f8 Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Mon, 27 Jul 2015 15:31:38 +0800 Subject: nfs: Fix an oops caused by using other thread's stack space in ASYNC mode An oops caused by using other thread's stack space in sunrpc ASYNC sending thread. [ 9839.007187] ------------[ cut here ]------------ [ 9839.007923] kernel BUG at fs/nfs/nfs4xdr.c:910! [ 9839.008069] invalid opcode: 0000 [#1] SMP [ 9839.008069] Modules linked in: blocklayoutdriver rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache snd_hda_codec_generic snd_hda_intel snd_hda_controller snd_hda_codec snd_hwdep snd_seq snd_seq_device snd_pcm joydev iosf_mbi crct10dif_pclmul snd_timer crc32_pclmul crc32c_intel ghash_clmulni_intel snd soundcore ppdev pvpanic parport_pc i2c_piix4 serio_raw virtio_balloon parport acpi_cpufreq nfsd nfs_acl lockd grace auth_rpcgss sunrpc qxl drm_kms_helper virtio_net virtio_console virtio_blk ttm drm virtio_pci virtio_ring virtio ata_generic pata_acpi [ 9839.008069] CPU: 0 PID: 308 Comm: kworker/0:1H Not tainted 4.0.0-0.rc4.git1.3.fc23.x86_64 #1 [ 9839.008069] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 9839.008069] Workqueue: rpciod rpc_async_schedule [sunrpc] [ 9839.008069] task: ffff8800d8b4d8e0 ti: ffff880036678000 task.ti: ffff880036678000 [ 9839.008069] RIP: 0010:[] [] reserve_space.part.73+0x9/0x10 [nfsv4] [ 9839.008069] RSP: 0018:ffff88003667ba58 EFLAGS: 00010246 [ 9839.008069] RAX: 0000000000000000 RBX: 000000001fc15e18 RCX: ffff8800c0193800 [ 9839.008069] RDX: ffff8800e4ae3f24 RSI: 000000001fc15e2c RDI: ffff88003667bcd0 [ 9839.008069] RBP: ffff88003667ba58 R08: ffff8800d9173008 R09: 0000000000000003 [ 9839.008069] R10: ffff88003667bcd0 R11: 000000000000000c R12: 0000000000010000 [ 9839.008069] R13: ffff8800d9173350 R14: 0000000000000000 R15: ffff8800c0067b98 [ 9839.008069] FS: 0000000000000000(0000) GS:ffff88011fc00000(0000) knlGS:0000000000000000 [ 9839.008069] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 9839.008069] CR2: 00007f988c9c8bb0 CR3: 00000000d99b6000 CR4: 00000000000407f0 [ 9839.008069] Stack: [ 9839.008069] ffff88003667bbc8 ffffffffa03412c5 00000000c6c55680 ffff880000000003 [ 9839.008069] 0000000000000088 00000010c6c55680 0001000000000002 ffffffff816e87e9 [ 9839.008069] 0000000000000000 00000000477290e2 ffff88003667bab8 ffffffff81327ba3 [ 9839.008069] Call Trace: [ 9839.008069] [] encode_attrs+0x435/0x530 [nfsv4] [ 9839.008069] [] ? inet_sendmsg+0x69/0xb0 [ 9839.008069] [] ? selinux_socket_sendmsg+0x23/0x30 [ 9839.008069] [] ? do_sock_sendmsg+0x9f/0xc0 [ 9839.008069] [] ? kernel_sendmsg+0x58/0x70 [ 9839.008069] [] ? xdr_reserve_space+0x20/0x170 [sunrpc] [ 9839.008069] [] ? xdr_reserve_space+0x20/0x170 [sunrpc] [ 9839.008069] [] ? nfs4_xdr_enc_open_noattr+0x130/0x130 [nfsv4] [ 9839.008069] [] encode_open+0x2d5/0x340 [nfsv4] [ 9839.008069] [] ? nfs4_xdr_enc_open_noattr+0x130/0x130 [nfsv4] [ 9839.008069] [] ? xdr_encode_opaque+0x19/0x20 [sunrpc] [ 9839.008069] [] ? encode_string+0x2b/0x40 [nfsv4] [ 9839.008069] [] nfs4_xdr_enc_open+0xb3/0x140 [nfsv4] [ 9839.008069] [] rpcauth_wrap_req+0xac/0xf0 [sunrpc] [ 9839.008069] [] call_transmit+0x18b/0x2d0 [sunrpc] [ 9839.008069] [] ? call_decode+0x860/0x860 [sunrpc] [ 9839.008069] [] ? call_decode+0x860/0x860 [sunrpc] [ 9839.008069] [] __rpc_execute+0x90/0x460 [sunrpc] [ 9839.008069] [] rpc_async_schedule+0x15/0x20 [sunrpc] [ 9839.008069] [] process_one_work+0x1bb/0x410 [ 9839.008069] [] worker_thread+0x53/0x470 [ 9839.008069] [] ? process_one_work+0x410/0x410 [ 9839.008069] [] ? process_one_work+0x410/0x410 [ 9839.008069] [] kthread+0xd8/0xf0 [ 9839.008069] [] ? kthread_worker_fn+0x180/0x180 [ 9839.008069] [] ret_from_fork+0x58/0x90 [ 9839.008069] [] ? kthread_worker_fn+0x180/0x180 [ 9839.008069] Code: 00 00 48 c7 c7 21 fa 37 a0 e8 94 1c d6 e0 c6 05 d2 17 05 00 01 8b 03 eb d7 66 0f 1f 84 00 00 00 00 00 66 66 66 66 90 55 48 89 e5 <0f> 0b 0f 1f 44 00 00 66 66 66 66 90 55 48 89 e5 41 54 53 89 f3 [ 9839.008069] RIP [] reserve_space.part.73+0x9/0x10 [nfsv4] [ 9839.008069] RSP [ 9839.071114] ---[ end trace cc14c03adb522e94 ]--- Signed-off-by: Kinglong Mee Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 21 +++++++++++++++++++++ fs/nfs/nfs4proc.c | 10 +++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 9e6475bc5ba2..797013822765 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[]; #ifdef CONFIG_NFS_V4_SECURITY_LABEL extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); +static inline struct nfs4_label * +nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src) +{ + if (!dst || !src) + return NULL; + + if (src->len > NFS4_MAXLABELLEN) + return NULL; + + dst->lfs = src->lfs; + dst->pi = src->pi; + dst->len = src->len; + memcpy(dst->label, src->label, src->len); + + return dst; +} static inline void nfs4_label_free(struct nfs4_label *label) { if (label) { @@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {} static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) { } +static inline struct nfs4_label * +nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src) +{ + return NULL; +} #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ /* proc.c */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c85ffe67b5f3..e94a964e9b4f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -912,6 +912,7 @@ struct nfs4_opendata { struct nfs_open_confirmres c_res; struct nfs4_string owner_name; struct nfs4_string group_name; + struct nfs4_label *a_label; struct nfs_fattr f_attr; struct nfs4_label *f_label; struct dentry *dir; @@ -1015,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, if (IS_ERR(p->f_label)) goto err_free_p; + p->a_label = nfs4_label_alloc(server, gfp_mask); + if (IS_ERR(p->a_label)) + goto err_free_f; + alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); if (IS_ERR(p->o_arg.seqid)) @@ -1043,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.server = server; p->o_arg.bitmask = nfs4_bitmask(server, label); p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; - p->o_arg.label = label; + p->o_arg.label = nfs4_label_copy(p->a_label, label); p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); switch (p->o_arg.claim) { case NFS4_OPEN_CLAIM_NULL: @@ -1076,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, return p; err_free_label: + nfs4_label_free(p->a_label); +err_free_f: nfs4_label_free(p->f_label); err_free_p: kfree(p); @@ -1095,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref) nfs4_put_open_state(p->state); nfs4_put_state_owner(p->owner); + nfs4_label_free(p->a_label); nfs4_label_free(p->f_label); dput(p->dir); -- cgit v1.2.3-59-g8ed1b From b2442c5a7fe92cca08437070c8a45a7aa0d1703e Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 29 Jul 2015 11:48:00 +1000 Subject: xfs: call dax_fault on read page faults for DAX When modifying the patch series to handle the XFS MMAP_LOCK nesting of page faults, I botched the conversion of the read page fault path, and so it is only every calling through the page cache. Re-add the necessary __dax_fault() call for such files. Because the get_blocks callback on read faults may not set up the mapping buffer correctly to allow unwritten extent completion to be run, we need to allow callers of __dax_fault() to pass a null complete_unwritten() callback. The DAX code always zeros the unwritten page when it is read faulted so there are no stale data exposure issues with not doing the conversion. The only downside will be the potential for increased CPU overhead on repeated read faults of the same page. If this proves to be a problem, then the filesystem needs to fix it's get_block callback and provide a convert_unwritten() callback to the read fault path. Signed-off-by: Dave Chinner Reviewed-by: Matthew Wilcox Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/dax.c | 14 ++++++++++++-- fs/xfs/xfs_file.c | 21 +++++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dax.c b/fs/dax.c index c3e21ccfc358..a7f77e1fa18c 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, * @vma: The virtual memory area where the fault occurred * @vmf: The description of the fault * @get_block: The filesystem method used to translate file offsets to blocks + * @complete_unwritten: The filesystem method used to convert unwritten blocks + * to written so the data written to them is exposed. This is required for + * required by write faults for filesystems that will return unwritten + * extent mappings from @get_block, but it is optional for reads as + * dax_insert_mapping() will always zero unwritten blocks. If the fs does + * not support unwritten extents, the it should pass NULL. * * When a page fault occurs, filesystems may call this helper in their * fault handler for DAX files. __dax_fault() assumes the caller has done all @@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, * as for normal BH based IO completions. */ error = dax_insert_mapping(inode, &bh, vma, vmf); - if (buffer_unwritten(&bh)) - complete_unwritten(&bh, !error); + if (buffer_unwritten(&bh)) { + if (complete_unwritten) + complete_unwritten(&bh, !error); + else + WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); + } out: if (error == -ENOMEM) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index f0e8249722d4..db4acc1c3e73 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1514,18 +1514,27 @@ xfs_filemap_fault( struct vm_area_struct *vma, struct vm_fault *vmf) { - struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); + struct inode *inode = file_inode(vma->vm_file); int ret; - trace_xfs_filemap_fault(ip); + trace_xfs_filemap_fault(XFS_I(inode)); /* DAX can shortcut the normal fault path on write faults! */ - if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) + if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) return xfs_filemap_page_mkwrite(vma, vmf); - xfs_ilock(ip, XFS_MMAPLOCK_SHARED); - ret = filemap_fault(vma, vmf); - xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); + if (IS_DAX(inode)) { + /* + * we do not want to trigger unwritten extent conversion on read + * faults - that is unnecessary overhead and would also require + * changes to xfs_get_blocks_direct() to map unwritten extent + * ioend for conversion on read-only mappings. + */ + ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL); + } else + ret = filemap_fault(vma, vmf); + xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); return ret; } -- cgit v1.2.3-59-g8ed1b From e3c32ee9e3e747fec01eb38e6610a9157d44c3ea Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 29 Jul 2015 11:48:01 +1000 Subject: xfs: remote attribute headers contain an invalid LSN In recent testing, a system that crashed failed log recovery on restart with a bad symlink buffer magic number: XFS (vda): Starting recovery (logdev: internal) XFS (vda): Bad symlink block magic! XFS: Assertion failed: 0, file: fs/xfs/xfs_log_recover.c, line: 2060 On examination of the log via xfs_logprint, none of the symlink buffers in the log had a bad magic number, nor were any other types of buffer log format headers mis-identified as symlink buffers. Tracing was used to find the buffer the kernel was tripping over, and xfs_db identified it's contents as: 000: 5841524d 00000000 00000346 64d82b48 8983e692 d71e4680 a5f49e2c b317576e 020: 00000000 00602038 00000000 006034ce d0020000 00000000 4d4d4d4d 4d4d4d4d 040: 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 060: 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d 4d4d4d4d ..... This is a remote attribute buffer, which are notable in that they are not logged but are instead written synchronously by the remote attribute code so that they exist on disk before the attribute transactions are committed to the journal. The above remote attribute block has an invalid LSN in it - cycle 0xd002000, block 0 - which means when log recovery comes along to determine if the transaction that writes to the underlying block should be replayed, it sees a block that has a future LSN and so does not replay the buffer data in the transaction. Instead, it validates the buffer magic number and attaches the buffer verifier to it. It is this buffer magic number check that is failing in the above assert, indicating that we skipped replay due to the LSN of the underlying buffer. The problem here is that the remote attribute buffers cannot have a valid LSN placed into them, because the transaction that contains the attribute tree pointer changes and the block allocation that the attribute data is being written to hasn't yet been committed. Hence the LSN field in the attribute block is completely unwritten, thereby leaving the underlying contents of the block in the LSN field. It could have any value, and hence a future overwrite of the block by log recovery may or may not work correctly. Fix this by always writing an invalid LSN to the remote attribute block, as any buffer in log recovery that needs to write over the remote attribute should occur. We are protected from having old data written over the attribute by the fact that freeing the block before the remote attribute is written will result in the buffer being marked stale in the log and so all changes prior to the buffer stale transaction will be cancelled by log recovery. Hence it is safe to ignore the LSN in the case or synchronously written, unlogged metadata such as remote attribute blocks, and to ensure we do that correctly, we need to write an invalid LSN to all remote attribute blocks to trigger immediate recovery of metadata that is written over the top. As a further protection for filesystems that may already have remote attribute blocks with bad LSNs on disk, change the log recovery code to always trigger immediate recovery of metadata over remote attribute blocks. cc: Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/libxfs/xfs_attr_remote.c | 29 +++++++++++++++++++++++------ fs/xfs/xfs_log_recover.c | 11 ++++++++--- 2 files changed, 31 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index 20de88d1bf86..2faec26962e8 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; - struct xfs_buf_log_item *bip = bp->b_fspriv; + int blksize = mp->m_attr_geo->blksize; char *ptr; int len; xfs_daddr_t bno; - int blksize = mp->m_attr_geo->blksize; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) @@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify( ASSERT(len >= blksize); while (len > 0) { + struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr; + if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_verifier_error(bp); return; } - if (bip) { - struct xfs_attr3_rmt_hdr *rmt; - rmt = (struct xfs_attr3_rmt_hdr *)ptr; - rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); + /* + * Ensure we aren't writing bogus LSNs to disk. See + * xfs_attr3_rmt_hdr_set() for the explanation. + */ + if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) { + xfs_buf_ioerror(bp, -EFSCORRUPTED); + xfs_verifier_error(bp); + return; } xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); @@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set( rmt->rm_owner = cpu_to_be64(ino); rmt->rm_blkno = cpu_to_be64(bno); + /* + * Remote attribute blocks are written synchronously, so we don't + * have an LSN that we can stamp in them that makes any sense to log + * recovery. To ensure that log recovery handles overwrites of these + * blocks sanely (i.e. once they've been freed and reallocated as some + * other type of metadata) we need to ensure that the LSN has a value + * that tells log recovery to ignore the LSN and overwrite the buffer + * with whatever is in it's log. To do this, we use the magic + * NULLCOMMITLSN to indicate that the LSN is invalid. + */ + rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN); + return sizeof(struct xfs_attr3_rmt_hdr); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 01dd228ca05e..480ebba8464f 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn( uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; break; case XFS_ATTR3_RMT_MAGIC: - lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); - uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; - break; + /* + * Remote attr blocks are written synchronously, rather than + * being logged. That means they do not contain a valid LSN + * (i.e. transactionally ordered) in them, and hence any time we + * see a buffer to replay over the top of a remote attribute + * block we should simply do so. + */ + goto recover_immediately; case XFS_SB_MAGIC: lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); uuid = &((struct xfs_dsb *)blk)->sb_uuid; -- cgit v1.2.3-59-g8ed1b From df150ed102baa0e78c06e08e975dfb47147dd677 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 29 Jul 2015 11:48:02 +1000 Subject: xfs: remote attributes need to be considered data We don't log remote attribute contents, and instead write them synchronously before we commit the block allocation and attribute tree update transaction. As a result we are writing to the allocated space before the allcoation has been made permanent. As a result, we cannot consider this allocation to be a metadata allocation. Metadata allocation can take blocks from the free list and so reuse them before the transaction that freed the block is committed to disk. This behaviour is perfectly fine for journalled metadata changes as log recovery will ensure the free operation is replayed before the overwrite, but for remote attribute writes this is not the case. Hence we have to consider the remote attribute blocks to contain data and allocate accordingly. We do this by dropping the XFS_BMAPI_METADATA flag from the block allocation. This means the allocation will not use blocks that are on the busy list without first ensuring that the freeing transaction has been committed to disk and the blocks removed from the busy list. This ensures we will never overwrite a freed block without first ensuring that it is really free. cc: Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/libxfs/xfs_attr_remote.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index 2faec26962e8..dd714037c322 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -451,14 +451,21 @@ xfs_attr_rmtval_set( /* * Allocate a single extent, up to the size of the value. + * + * Note that we have to consider this a data allocation as we + * write the remote attribute without logging the contents. + * Hence we must ensure that we aren't using blocks that are on + * the busy list so that we don't overwrite blocks which have + * recently been freed but their transactions are not yet + * committed to disk. If we overwrite the contents of a busy + * extent and then crash then the block may not contain the + * correct metadata after log recovery occurs. */ xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, - blkcnt, - XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, - args->firstblock, args->total, &map, &nmap, - args->flist); + blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock, + args->total, &map, &nmap, args->flist); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); -- cgit v1.2.3-59-g8ed1b From 97242f99a013950af63effa0732f8ef7db4e31ec Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 1 Aug 2015 19:59:28 -0400 Subject: link_path_walk(): be careful when failing with ENOTDIR In RCU mode we might end up with dentry evicted just we check that it's a directory. In such case we should return ECHILD rather than ENOTDIR, so that pathwalk would be retries in non-RCU mode. Breakage had been introduced in commit b18825a - prior to that we were looking at nd->inode, which had been fetched before verifying that ->d_seq was still valid. That form of check would only be satisfied if at some point the pathname prefix would indeed have resolved to a non-directory. The fix consists of checking ->d_seq after we'd run into a non-directory dentry, and failing with ECHILD in case of mismatch. Note that all branches since 3.12 have that problem... Signed-off-by: Al Viro --- fs/namei.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index ae4e4c18b2ac..fbbcf0993312 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1954,8 +1954,13 @@ OK: continue; } } - if (unlikely(!d_can_lookup(nd->path.dentry))) + if (unlikely(!d_can_lookup(nd->path.dentry))) { + if (nd->flags & LOOKUP_RCU) { + if (unlazy_walk(nd, NULL, 0)) + return -ECHILD; + } return -ENOTDIR; + } } } -- cgit v1.2.3-59-g8ed1b