aboutsummaryrefslogtreecommitdiffstats
path: root/net/mptcp/protocol.h
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2020-11-27 11:10:24 +0100
committerJakub Kicinski <kuba@kernel.org>2020-11-30 17:55:23 -0800
commit879526030c8b5e8bd786a6408730893b9b2958ea (patch)
treee30eb84b720672ebf301180083e7599531febd12 /net/mptcp/protocol.h
parentmptcp: implement wmem reservation (diff)
downloadlinux-dev-879526030c8b5e8bd786a6408730893b9b2958ea.tar.xz
linux-dev-879526030c8b5e8bd786a6408730893b9b2958ea.zip
mptcp: protect the rx path with the msk socket spinlock
Such spinlock is currently used only to protect the 'owned' flag inside the socket lock itself. With this patch, we extend its scope to protect the whole msk receive path and sk_forward_memory. Given the above, we can always move data into the msk receive queue (and OoO queue) from the subflow. We leverage the previous commit, so that we need to acquire the spinlock in the tx path only when moving fwd memory. recvmsg() must now explicitly acquire the socket spinlock when moving skbs out of sk_receive_queue. To reduce the number of lock operations required we use a second rx queue and splice the first into the latter in mptcp_lock_sock(). Additionally rmem allocated memory is bulk-freed via release_cb() Acked-by: Florian Westphal <fw@strlen.de> Co-developed-by: Florian Westphal <fw@strlen.de> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/protocol.h')
-rw-r--r--net/mptcp/protocol.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 4cf355076e35..fe2efd923c5c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -227,6 +227,7 @@ struct mptcp_sock {
unsigned long timer_ival;
u32 token;
int rmem_pending;
+ int rmem_released;
unsigned long flags;
bool can_ack;
bool fully_established;
@@ -238,6 +239,7 @@ struct mptcp_sock {
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
+ struct sk_buff_head receive_queue;
struct list_head conn_list;
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
@@ -267,6 +269,9 @@ struct mptcp_sock {
local_bh_enable(); \
} while (0)
+#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+#define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock)
+
#define mptcp_for_each_subflow(__msk, __subflow) \
list_for_each_entry(__subflow, &((__msk)->conn_list), node)