aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/cifsfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r--fs/cifs/cifsfs.c173
1 files changed, 122 insertions, 51 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index dca42aa87d30..fe220686bba4 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -26,6 +26,7 @@
#include <linux/random.h>
#include <linux/uuid.h>
#include <linux/xattr.h>
+#include <uapi/linux/magic.h>
#include <net/ipv6.h>
#include "cifsfs.h"
#include "cifspdu.h"
@@ -45,6 +46,7 @@
#include "netlink.h"
#endif
#include "fs_context.h"
+#include "cached_dir.h"
/*
* DOS dates from 1980/1/1 through 2107/12/31
@@ -67,6 +69,34 @@ bool enable_negotiate_signing; /* false by default */
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
+
+/*
+ * Global transaction id (XID) information
+ */
+unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
+unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
+spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+
+/*
+ * Global counters, updated atomically
+ */
+atomic_t sesInfoAllocCount;
+atomic_t tconInfoAllocCount;
+atomic_t tcpSesNextId;
+atomic_t tcpSesAllocCount;
+atomic_t tcpSesReconnectCount;
+atomic_t tconInfoReconnectCount;
+
+atomic_t mid_count;
+atomic_t buf_alloc_count;
+atomic_t small_buf_alloc_count;
+#ifdef CONFIG_CIFS_STATS2
+atomic_t total_buf_alloc_count;
+atomic_t total_small_buf_alloc_count;
+#endif/* STATS2 */
+struct list_head cifs_tcp_ses_list;
+spinlock_t cifs_tcp_ses_lock;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, uint, 0444);
@@ -202,13 +232,16 @@ cifs_read_super(struct super_block *sb)
sb->s_time_max = ts.tv_sec;
}
- sb->s_magic = CIFS_MAGIC_NUMBER;
+ sb->s_magic = CIFS_SUPER_MAGIC;
sb->s_op = &cifs_super_ops;
sb->s_xattr = cifs_xattr_handlers;
rc = super_setup_bdi(sb);
if (rc)
goto out_no_root;
/* tune readahead according to rsize if readahead size not set on mount */
+ if (cifs_sb->ctx->rsize == 0)
+ cifs_sb->ctx->rsize =
+ tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
if (cifs_sb->ctx->rasize)
sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
else
@@ -251,28 +284,18 @@ out_no_root:
static void cifs_kill_sb(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_tcon *tcon;
- struct cached_fid *cfid;
/*
* We ned to release all dentries for the cached directories
* before we kill the sb.
*/
if (cifs_sb->root) {
+ close_all_cached_dirs(cifs_sb);
+
+ /* finally release root dentry */
dput(cifs_sb->root);
cifs_sb->root = NULL;
}
- tcon = cifs_sb_master_tcon(cifs_sb);
- if (tcon) {
- cfid = &tcon->crfid;
- mutex_lock(&cfid->fid_mutex);
- if (cfid->dentry) {
-
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
- mutex_unlock(&cfid->fid_mutex);
- }
kill_anon_super(sb);
cifs_umount(cifs_sb);
@@ -353,7 +376,7 @@ static struct inode *
cifs_alloc_inode(struct super_block *sb)
{
struct cifsInodeInfo *cifs_inode;
- cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
+ cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
if (!cifs_inode)
return NULL;
cifs_inode->cifsAttrs = 0x20; /* default */
@@ -366,36 +389,44 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode->flags = 0;
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
- cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
+ cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
cifs_inode->server_eof = 0;
cifs_inode->uniqueid = 0;
cifs_inode->createtime = 0;
cifs_inode->epoch = 0;
spin_lock_init(&cifs_inode->open_file_lock);
generate_random_uuid(cifs_inode->lease_key);
+ cifs_inode->symlink_target = NULL;
/*
* Can not set i_flags here - they get immediately overwritten to zero
* by the VFS.
*/
- /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
+ /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
INIT_LIST_HEAD(&cifs_inode->openFileList);
INIT_LIST_HEAD(&cifs_inode->llist);
INIT_LIST_HEAD(&cifs_inode->deferred_closes);
spin_lock_init(&cifs_inode->deferred_lock);
- return &cifs_inode->vfs_inode;
+ return &cifs_inode->netfs.inode;
}
static void
cifs_free_inode(struct inode *inode)
{
- kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+
+ if (S_ISLNK(inode->i_mode))
+ kfree(cinode->symlink_target);
+ kmem_cache_free(cifs_inode_cachep, cinode);
}
static void
cifs_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
+ if (inode->i_state & I_PINNING_FSCACHE_WB)
+ cifs_fscache_unuse_inode_cookie(inode, true);
+ cifs_fscache_release_inode_cookie(inode);
clear_inode(inode);
}
@@ -568,6 +599,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",nocase");
if (tcon->nodelete)
seq_puts(s, ",nodelete");
+ if (cifs_sb->ctx->no_sparse)
+ seq_puts(s, ",nosparse");
if (tcon->local_lease)
seq_puts(s, ",locallease");
if (tcon->retry)
@@ -665,6 +698,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
}
+ seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
if (tcon->ses->chan_max > 1)
seq_printf(s, ",multichannel,max_channels=%zu",
@@ -687,14 +721,17 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock);
- if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+ spin_lock(&tcon->tc_lock);
+ if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to force umount this and woken up
all waiting network requests, nothing to do */
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return;
} else if (tcon->tc_count == 1)
- tcon->tidStatus = CifsExiting;
+ tcon->status = TID_EXITING;
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
@@ -720,6 +757,12 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
}
#endif
+static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
+ return 0;
+}
+
static int cifs_drop_inode(struct inode *inode)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -732,6 +775,7 @@ static int cifs_drop_inode(struct inode *inode)
static const struct super_operations cifs_super_ops = {
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
+ .write_inode = cifs_write_inode,
.free_inode = cifs_free_inode,
.drop_inode = cifs_drop_inode,
.evict_inode = cifs_evict_inode,
@@ -773,7 +817,7 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
sep = CIFS_DIR_SEP(cifs_sb);
dentry = dget(sb->s_root);
- p = s = full_path;
+ s = full_path;
do {
struct inode *dir = d_inode(dentry);
@@ -815,7 +859,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
int flags, struct smb3_fs_context *old_ctx)
{
int rc;
- struct super_block *sb;
+ struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
struct cifs_mnt_data mnt_data;
struct dentry *root;
@@ -908,11 +952,14 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
out_super:
deactivate_locked_super(sb);
+ return root;
out:
if (cifs_sb) {
- kfree(cifs_sb->prepath);
- smb3_cleanup_fs_context(cifs_sb->ctx);
- kfree(cifs_sb);
+ if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
+ kfree(cifs_sb->prepath);
+ smb3_cleanup_fs_context(cifs_sb->ctx);
+ kfree(cifs_sb);
+ }
}
return root;
}
@@ -924,7 +971,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
ssize_t rc;
struct inode *inode = file_inode(iocb->ki_filp);
- if (iocb->ki_filp->f_flags & O_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT)
return cifs_user_readv(iocb, iter);
rc = cifs_revalidate_mapping(inode);
@@ -1060,7 +1107,7 @@ struct file_system_type cifs_fs_type = {
};
MODULE_ALIAS_FS("cifs");
-static struct file_system_type smb3_fs_type = {
+struct file_system_type smb3_fs_type = {
.owner = THIS_MODULE,
.name = "smb3",
.init_fs_context = smb3_init_fs_context,
@@ -1096,6 +1143,30 @@ const struct inode_operations cifs_file_inode_ops = {
.fiemap = cifs_fiemap,
};
+const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ char *target_path;
+
+ target_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!target_path)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&inode->i_lock);
+ if (likely(CIFS_I(inode)->symlink_target)) {
+ strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
+ } else {
+ kfree(target_path);
+ target_path = ERR_PTR(-EOPNOTSUPP);
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (!IS_ERR(target_path))
+ set_delayed_call(done, kfree_link, target_path);
+
+ return target_path;
+}
+
const struct inode_operations cifs_symlink_inode_ops = {
.get_link = cifs_get_link,
.permission = cifs_permission,
@@ -1206,6 +1277,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
lock_two_nondirectories(target_inode, src_inode);
cifs_dbg(FYI, "about to flush pages\n");
+
+ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
+ off + len - 1);
+ if (rc)
+ goto out;
+
/* should we flush first and last page first */
truncate_inode_pages(&target_inode->i_data, 0);
@@ -1249,8 +1326,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
ssize_t rc;
struct cifsFileInfo *cfile = dst_file->private_data;
- if (cfile->swapfile)
- return -EOPNOTSUPP;
+ if (cfile->swapfile) {
+ rc = -EOPNOTSUPP;
+ free_xid(xid);
+ return rc;
+ }
rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
len, flags);
@@ -1392,7 +1472,7 @@ cifs_init_once(void *inode)
{
struct cifsInodeInfo *cifsi = inode;
- inode_init_once(&cifsi->vfs_inode);
+ inode_init_once(&cifsi->netfs.inode);
init_rwsem(&cifsi->lock_sem);
}
@@ -1511,8 +1591,7 @@ cifs_destroy_request_bufs(void)
kmem_cache_destroy(cifs_sm_req_cachep);
}
-static int
-cifs_init_mids(void)
+static int init_mids(void)
{
cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
sizeof(struct mid_q_entry), 0,
@@ -1530,8 +1609,7 @@ cifs_init_mids(void)
return 0;
}
-static void
-cifs_destroy_mids(void)
+static void destroy_mids(void)
{
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
@@ -1553,11 +1631,11 @@ init_cifs(void)
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
- atomic_set(&bufAllocCount, 0);
- atomic_set(&smBufAllocCount, 0);
+ atomic_set(&buf_alloc_count, 0);
+ atomic_set(&small_buf_alloc_count, 0);
#ifdef CONFIG_CIFS_STATS2
- atomic_set(&totBufAllocCount, 0);
- atomic_set(&totSmBufAllocCount, 0);
+ atomic_set(&total_buf_alloc_count, 0);
+ atomic_set(&total_small_buf_alloc_count, 0);
if (slow_rsp_threshold < 1)
cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
else if (slow_rsp_threshold > 32767)
@@ -1565,7 +1643,7 @@ init_cifs(void)
"slow response threshold set higher than recommended (0 to 32767)\n");
#endif /* CONFIG_CIFS_STATS2 */
- atomic_set(&midCount, 0);
+ atomic_set(&mid_count, 0);
GlobalCurrentXid = 0;
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
@@ -1624,15 +1702,11 @@ init_cifs(void)
goto out_destroy_cifsoplockd_wq;
}
- rc = cifs_fscache_register();
- if (rc)
- goto out_destroy_deferredclose_wq;
-
rc = cifs_init_inodecache();
if (rc)
- goto out_unreg_fscache;
+ goto out_destroy_deferredclose_wq;
- rc = cifs_init_mids();
+ rc = init_mids();
if (rc)
goto out_destroy_inodecache;
@@ -1689,11 +1763,9 @@ out_destroy_request_bufs:
#endif
cifs_destroy_request_bufs();
out_destroy_mids:
- cifs_destroy_mids();
+ destroy_mids();
out_destroy_inodecache:
cifs_destroy_inodecache();
-out_unreg_fscache:
- cifs_fscache_unregister();
out_destroy_deferredclose_wq:
destroy_workqueue(deferredclose_wq);
out_destroy_cifsoplockd_wq:
@@ -1727,9 +1799,8 @@ exit_cifs(void)
dfs_cache_destroy();
#endif
cifs_destroy_request_bufs();
- cifs_destroy_mids();
+ destroy_mids();
cifs_destroy_inodecache();
- cifs_fscache_unregister();
destroy_workqueue(deferredclose_wq);
destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(decrypt_wq);