aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-04-10 18:34:08 -0700
committerJakub Kicinski <kuba@kernel.org>2025-04-10 18:34:09 -0700
commit8127837aae8f63ec30fcba7f5afd4887f83111cd (patch)
tree5cd2ae7708d894c5823c0482c67db0be54ea697c
parentMerge branch 'net-stmmac-stmmac_pltfr_find_clk' (diff)
parenttrace: tcp: Add tracepoint for tcp_sendmsg_locked() (diff)
downloadwireguard-linux-8127837aae8f63ec30fcba7f5afd4887f83111cd.tar.xz
wireguard-linux-8127837aae8f63ec30fcba7f5afd4887f83111cd.zip
Merge branch 'trace-add-tracepoint-for-tcp_sendmsg_locked'
Breno Leitao says: ==================== trace: add tracepoint for tcp_sendmsg_locked() Meta has been using BPF programs to monitor tcp_sendmsg() for years, indicating significant interest in observing this important functionality. Adding a proper tracepoint provides a stable API for all users who need visibility into TCP message transmission. David Ahern is using a similar functionality with a custom patch[1]. So, this means we have more than a single use case for this request, and it might be a good idea to have such feature upstream. Link: https://lore.kernel.org/all/70168c8f-bf52-4279-b4c4-be64527aa1ac@kernel.org/ [1] v2: https://lore.kernel.org/20250407-tcpsendmsg-v2-0-9f0ea843ef99@debian.org v1: https://lore.kernel.org/20250224-tcpsendmsg-v1-1-bac043c59cc8@debian.org ==================== Link: https://patch.msgid.link/20250408-tcpsendmsg-v3-0-208b87064c28@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/trace/events/tcp.h24
-rw-r--r--kernel/bpf/btf.c1
-rw-r--r--net/ipv4/tcp.c2
4 files changed, 28 insertions, 1 deletions
diff --git a/include/linux/socket.h b/include/linux/socket.h
index c3322eb3d686..3b262487ec06 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -168,7 +168,7 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
-static inline size_t msg_data_left(struct msghdr *msg)
+static inline size_t msg_data_left(const struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 1a40c41ff8c3..75d3d53a3832 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -259,6 +259,30 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+TRACE_EVENT(tcp_sendmsg_locked,
+ TP_PROTO(const struct sock *sk, const struct msghdr *msg,
+ const struct sk_buff *skb, int size_goal),
+
+ TP_ARGS(sk, msg, skb, size_goal),
+
+ TP_STRUCT__entry(
+ __field(const void *, skb_addr)
+ __field(int, skb_len)
+ __field(int, msg_left)
+ __field(int, size_goal)
+ ),
+
+ TP_fast_assign(
+ __entry->skb_addr = skb;
+ __entry->skb_len = skb ? skb->len : 0;
+ __entry->msg_left = msg_data_left(msg);
+ __entry->size_goal = size_goal;
+ ),
+
+ TP_printk("skb_addr %p skb_len %d msg_left %d size_goal %d",
+ __entry->skb_addr, __entry->skb_len, __entry->msg_left,
+ __entry->size_goal));
+
DECLARE_TRACE(tcp_cwnd_reduction_tp,
TP_PROTO(const struct sock *sk, int newly_acked_sacked,
int newly_lost, int flag),
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 16ba36f34dfa..24a26b4bb0b8 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6541,6 +6541,7 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
{ "xprt_put_cong", 0x10 },
/* tcp */
{ "tcp_send_reset", 0x11 },
+ { "tcp_sendmsg_locked", 0x100 },
/* tegra_apb_dma */
{ "tegra_dma_tx_status", 0x100 },
/* timer_migration */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6edc441b3702..e0e96f8fd47c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1160,6 +1160,8 @@ restart:
if (skb)
copy = size_goal - skb->len;
+ trace_tcp_sendmsg_locked(sk, msg, skb, size_goal);
+
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
bool first_skb;