aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/chelsio/chtls/chtls_cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/chelsio/chtls/chtls_cm.c')
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c59
1 files changed, 30 insertions, 29 deletions
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..9b2745ad9e38 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1273,7 +1286,7 @@ static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
ctx = (struct listen_ctx *)data;
lsk = ctx->lsk;
- if (unlikely(tid >= cdev->tids->ntids)) {
+ if (unlikely(tid_out_of_range(cdev->tids, tid))) {
pr_info("passive open TID %u too large\n", tid);
return 1;
}
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;