aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r--fs/cifs/transport.c268
1 files changed, 151 insertions, 117 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 7ce8a585abd6..9a16ff4b9f5e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -104,7 +104,10 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
__le16 command = midEntry->server->vals->lock_cmd;
+ __u16 smb_cmd = le16_to_cpu(midEntry->command);
unsigned long now;
+ unsigned long roundtrip_time;
+ struct TCP_Server_Info *server = midEntry->server;
#endif
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
@@ -114,6 +117,23 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
cifs_small_buf_release(midEntry->resp_buf);
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
+ if (now < midEntry->when_alloc)
+ cifs_dbg(VFS, "invalid mid allocation time\n");
+ roundtrip_time = now - midEntry->when_alloc;
+
+ if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
+ if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
+ server->slowest_cmd[smb_cmd] = roundtrip_time;
+ server->fastest_cmd[smb_cmd] = roundtrip_time;
+ } else {
+ if (server->slowest_cmd[smb_cmd] < roundtrip_time)
+ server->slowest_cmd[smb_cmd] = roundtrip_time;
+ else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
+ server->fastest_cmd[smb_cmd] = roundtrip_time;
+ }
+ cifs_stats_inc(&server->num_cmds[smb_cmd]);
+ server->time_per_cmd[smb_cmd] += roundtrip_time;
+ }
/*
* commands taking longer than one second (default) can be indications
* that something is wrong, unless it is quite a slow link or a very
@@ -131,11 +151,10 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
* NB: le16_to_cpu returns unsigned so can not be negative below
*/
- if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
- cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
+ if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
+ cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
- trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
- midEntry->mid, midEntry->pid,
+ trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
midEntry->when_sent, midEntry->when_received);
if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
@@ -300,7 +319,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
__be32 rfc1002_marker;
if (cifs_rdma_enabled(server) && server->smbd_conn) {
- rc = smbd_send(server, rqst);
+ rc = smbd_send(server, num_rqst, rqst);
goto smbd_done;
}
@@ -486,15 +505,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
}
static int
-wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
- int *credits, unsigned int *instance)
+wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ const int timeout, const int flags,
+ unsigned int *instance)
{
int rc;
+ int *credits;
+ int optype;
+ long int t;
+
+ if (timeout < 0)
+ t = MAX_JIFFY_OFFSET;
+ else
+ t = msecs_to_jiffies(timeout);
+
+ optype = flags & CIFS_OP_MASK;
*instance = 0;
+ credits = server->ops->get_credits_field(server, optype);
+ /* Since an echo is already inflight, no need to wait to send another */
+ if (*credits <= 0 && optype == CIFS_ECHO_OP)
+ return -EAGAIN;
+
spin_lock(&server->req_lock);
- if (timeout == CIFS_ASYNC_OP) {
+ if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
/* oplock breaks must not be held up */
server->in_flight++;
*credits -= 1;
@@ -504,14 +539,21 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
}
while (1) {
- if (*credits <= 0) {
+ if (*credits < num_credits) {
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
- rc = wait_event_killable(server->request_q,
- has_credits(server, credits));
+ rc = wait_event_killable_timeout(server->request_q,
+ has_credits(server, credits, num_credits), t);
cifs_num_waiters_dec(server);
- if (rc)
- return rc;
+ if (!rc) {
+ trace_smb3_credit_timeout(server->CurrentMid,
+ server->hostname, num_credits);
+ cifs_dbg(VFS, "wait timed out after %d ms\n",
+ timeout);
+ return -ENOTSUPP;
+ }
+ if (rc == -ERESTARTSYS)
+ return -ERESTARTSYS;
spin_lock(&server->req_lock);
} else {
if (server->tcpStatus == CifsExiting) {
@@ -520,14 +562,52 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
}
/*
+ * For normal commands, reserve the last MAX_COMPOUND
+ * credits to compound requests.
+ * Otherwise these compounds could be permanently
+ * starved for credits by single-credit requests.
+ *
+ * To prevent spinning CPU, block this thread until
+ * there are >MAX_COMPOUND credits available.
+ * But only do this is we already have a lot of
+ * credits in flight to avoid triggering this check
+ * for servers that are slow to hand out credits on
+ * new sessions.
+ */
+ if (!optype && num_credits == 1 &&
+ server->in_flight > 2 * MAX_COMPOUND &&
+ *credits <= MAX_COMPOUND) {
+ spin_unlock(&server->req_lock);
+ cifs_num_waiters_inc(server);
+ rc = wait_event_killable_timeout(
+ server->request_q,
+ has_credits(server, credits,
+ MAX_COMPOUND + 1),
+ t);
+ cifs_num_waiters_dec(server);
+ if (!rc) {
+ trace_smb3_credit_timeout(
+ server->CurrentMid,
+ server->hostname, num_credits);
+ cifs_dbg(VFS, "wait timed out after %d ms\n",
+ timeout);
+ return -ENOTSUPP;
+ }
+ if (rc == -ERESTARTSYS)
+ return -ERESTARTSYS;
+ spin_lock(&server->req_lock);
+ continue;
+ }
+
+ /*
* Can not count locking commands against total
* as they are allowed to block on server.
*/
/* update # of requests on the wire to server */
- if (timeout != CIFS_BLOCKING_OP) {
- *credits -= 1;
- server->in_flight++;
+ if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
+ *credits -= num_credits;
+ server->in_flight += num_credits;
*instance = server->reconnect_instance;
}
spin_unlock(&server->req_lock);
@@ -538,16 +618,36 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
}
static int
-wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
- const int optype, unsigned int *instance)
+wait_for_free_request(struct TCP_Server_Info *server, const int flags,
+ unsigned int *instance)
+{
+ return wait_for_free_credits(server, 1, -1, flags,
+ instance);
+}
+
+static int
+wait_for_compound_request(struct TCP_Server_Info *server, int num,
+ const int flags, unsigned int *instance)
{
- int *val;
+ int *credits;
- val = server->ops->get_credits_field(server, optype);
- /* Since an echo is already inflight, no need to wait to send another */
- if (*val <= 0 && optype == CIFS_ECHO_OP)
- return -EAGAIN;
- return wait_for_free_credits(server, timeout, val, instance);
+ credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
+
+ spin_lock(&server->req_lock);
+ if (*credits < num) {
+ /*
+ * Return immediately if not too many requests in flight since
+ * we will likely be stuck on waiting for credits.
+ */
+ if (server->in_flight < num - *credits) {
+ spin_unlock(&server->req_lock);
+ return -ENOTSUPP;
+ }
+ }
+ spin_unlock(&server->req_lock);
+
+ return wait_for_free_credits(server, num, 60000, flags,
+ instance);
}
int
@@ -646,16 +746,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid_handle_t *handle, void *cbdata, const int flags,
const struct cifs_credits *exist_credits)
{
- int rc, timeout, optype;
+ int rc;
struct mid_q_entry *mid;
struct cifs_credits credits = { .value = 0, .instance = 0 };
unsigned int instance;
+ int optype;
- timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
if ((flags & CIFS_HAS_CREDITS) == 0) {
- rc = wait_for_free_request(server, timeout, optype, &instance);
+ rc = wait_for_free_request(server, flags, &instance);
if (rc)
return rc;
credits.value = 1;
@@ -738,7 +838,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
iov[0].iov_base = in_buf;
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
- flags |= CIFS_NO_RESP;
+ flags |= CIFS_NO_RSP_BUF;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
@@ -871,18 +971,15 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
const int flags, const int num_rqst, struct smb_rqst *rqst,
int *resp_buf_type, struct kvec *resp_iov)
{
- int i, j, rc = 0;
- int timeout, optype;
+ int i, j, optype, rc = 0;
struct mid_q_entry *midQ[MAX_COMPOUND];
bool cancelled_mid[MAX_COMPOUND] = {false};
struct cifs_credits credits[MAX_COMPOUND] = {
{ .value = 0, .instance = 0 }
};
unsigned int instance;
- unsigned int first_instance = 0;
char *buf;
- timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
for (i = 0; i < num_rqst; i++)
@@ -896,81 +993,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
if (ses->server->tcpStatus == CifsExiting)
return -ENOENT;
- spin_lock(&ses->server->req_lock);
- if (ses->server->credits < num_rqst) {
- /*
- * Return immediately if not too many requests in flight since
- * we will likely be stuck on waiting for credits.
- */
- if (ses->server->in_flight < num_rqst - ses->server->credits) {
- spin_unlock(&ses->server->req_lock);
- return -ENOTSUPP;
- }
- } else {
- /* enough credits to send the whole compounded request */
- ses->server->credits -= num_rqst;
- ses->server->in_flight += num_rqst;
- first_instance = ses->server->reconnect_instance;
- }
- spin_unlock(&ses->server->req_lock);
-
- if (first_instance) {
- cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
- for (i = 0; i < num_rqst; i++) {
- credits[i].value = 1;
- credits[i].instance = first_instance;
- }
- goto setup_rqsts;
- }
-
/*
- * There are not enough credits to send the whole compound request but
- * there are requests in flight that may bring credits from the server.
+ * Wait for all the requests to become available.
* This approach still leaves the possibility to be stuck waiting for
* credits if the server doesn't grant credits to the outstanding
- * requests. This should be fixed by returning immediately and letting
- * a caller fallback to sequential commands instead of compounding.
- * Ensure we obtain 1 credit per request in the compound chain.
+ * requests and if the client is completely idle, not generating any
+ * other requests.
+ * This can be handled by the eventual session reconnect.
*/
- for (i = 0; i < num_rqst; i++) {
- rc = wait_for_free_request(ses->server, timeout, optype,
- &instance);
-
- if (rc == 0) {
- credits[i].value = 1;
- credits[i].instance = instance;
- /*
- * All parts of the compound chain must get credits from
- * the same session, otherwise we may end up using more
- * credits than the server granted. If there were
- * reconnects in between, return -EAGAIN and let callers
- * handle it.
- */
- if (i == 0)
- first_instance = instance;
- else if (first_instance != instance) {
- i++;
- rc = -EAGAIN;
- }
- }
+ rc = wait_for_compound_request(ses->server, num_rqst, flags,
+ &instance);
+ if (rc)
+ return rc;
- if (rc) {
- /*
- * We haven't sent an SMB packet to the server yet but
- * we already obtained credits for i requests in the
- * compound chain - need to return those credits back
- * for future use. Note that we need to call add_credits
- * multiple times to match the way we obtained credits
- * in the first place and to account for in flight
- * requests correctly.
- */
- for (j = 0; j < i; j++)
- add_credits(ses->server, &credits[j], optype);
- return rc;
- }
+ for (i = 0; i < num_rqst; i++) {
+ credits[i].value = 1;
+ credits[i].instance = instance;
}
-setup_rqsts:
/*
* Make sure that we sign in the same order that we send on this socket
* and avoid races inside tcp sendmsg code that could cause corruption
@@ -981,14 +1021,12 @@ setup_rqsts:
/*
* All the parts of the compound chain belong obtained credits from the
- * same session (see the appropriate checks above). In the same time
- * there might be reconnects after those checks but before we acquired
- * the srv_mutex. We can not use credits obtained from the previous
+ * same session. We can not use credits obtained from the previous
* session to send this request. Check if there were reconnects after
* we obtained credits and return -EAGAIN in such cases to let callers
* handle it.
*/
- if (first_instance != ses->server->reconnect_instance) {
+ if (instance != ses->server->reconnect_instance) {
mutex_unlock(&ses->server->srv_mutex);
for (j = 0; j < num_rqst; j++)
add_credits(ses->server, &credits[j], optype);
@@ -1035,8 +1073,11 @@ setup_rqsts:
mutex_unlock(&ses->server->srv_mutex);
- if (rc < 0) {
- /* Sending failed for some reason - return credits back */
+ /*
+ * If sending failed for some reason or it is an oplock break that we
+ * will not receive a response to - return credits back
+ */
+ if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
for (i = 0; i < num_rqst; i++)
add_credits(ses->server, &credits[i], optype);
goto out;
@@ -1057,9 +1098,6 @@ setup_rqsts:
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
rqst[0].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
- goto out;
-
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
if (rc != 0)
@@ -1113,7 +1151,7 @@ setup_rqsts:
flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by cifs_delete_mid */
- if ((flags & CIFS_NO_RESP) == 0)
+ if ((flags & CIFS_NO_RSP_BUF) == 0)
midQ[i]->resp_buf = NULL;
}
@@ -1194,7 +1232,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
- int *pbytes_returned, const int timeout)
+ int *pbytes_returned, const int flags)
{
int rc = 0;
struct mid_q_entry *midQ;
@@ -1225,7 +1263,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- rc = wait_for_free_request(ses->server, timeout, 0, &credits.instance);
+ rc = wait_for_free_request(ses->server, flags, &credits.instance);
if (rc)
return rc;
@@ -1264,9 +1302,6 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc < 0)
goto out;
- if (timeout == CIFS_ASYNC_OP)
- goto out;
-
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
send_cancel(ses->server, &rqst, midQ);
@@ -1367,8 +1402,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
- rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0,
- &instance);
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
if (rc)
return rc;