aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJordan Rife <jordan@jrife.io>2025-05-02 09:15:23 -0700
committerMartin KaFai Lau <martin.lau@kernel.org>2025-05-02 11:46:42 -0700
commit251c6636e0159fca79c3b78d982d1cec6f8785bc (patch)
tree2b4053ecc77ac28a04ad434d095381cc8d8af4c7
parentbpf: udp: Get rid of st_bucket_done (diff)
downloadlinux-rng-251c6636e0159fca79c3b78d982d1cec6f8785bc.tar.xz
linux-rng-251c6636e0159fca79c3b78d982d1cec6f8785bc.zip
bpf: udp: Use bpf_udp_iter_batch_item for bpf_udp_iter_state batch items
Prepare for the next patch that tracks cookies between iterations by converting struct sock **batch to union bpf_udp_iter_batch_item *batch inside struct bpf_udp_iter_state. Signed-off-by: Jordan Rife <jordan@jrife.io> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
-rw-r--r--net/ipv4/udp.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f2740802ee86..fe1438b2bcba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -3413,13 +3413,17 @@ struct bpf_iter__udp {
int bucket __aligned(8);
};
+union bpf_udp_iter_batch_item {
+ struct sock *sk;
+};
+
struct bpf_udp_iter_state {
struct udp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
int offset;
- struct sock **batch;
+ union bpf_udp_iter_batch_item *batch;
};
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
@@ -3480,7 +3484,7 @@ fill_batch:
}
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
}
batch_sks++;
}
@@ -3516,7 +3520,7 @@ fill_batch:
}
/* Pick up where we left off. */
- sk = iter->batch[iter->end_sk - 1];
+ sk = iter->batch[iter->end_sk - 1].sk;
sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
struct sock,
__sk_common.skc_portaddr_node);
@@ -3533,7 +3537,7 @@ next_bucket:
}
WARN_ON_ONCE(iter->end_sk != batch_sks);
- return iter->end_sk ? iter->batch[0] : NULL;
+ return iter->end_sk ? iter->batch[0].sk : NULL;
}
static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -3545,7 +3549,7 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* done with seq_show(), so unref the iter->cur_sk.
*/
if (iter->cur_sk < iter->end_sk) {
- sock_put(iter->batch[iter->cur_sk++]);
+ sock_put(iter->batch[iter->cur_sk++].sk);
++iter->offset;
}
@@ -3553,7 +3557,7 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* available in the current bucket batch.
*/
if (iter->cur_sk < iter->end_sk)
- sk = iter->batch[iter->cur_sk];
+ sk = iter->batch[iter->cur_sk].sk;
else
/* Prepare a new batch. */
sk = bpf_iter_udp_batch(seq);
@@ -3620,7 +3624,7 @@ static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
unsigned int cur_sk = iter->cur_sk;
while (cur_sk < iter->end_sk)
- sock_put(iter->batch[cur_sk++]);
+ sock_put(iter->batch[cur_sk++].sk);
}
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
@@ -3890,7 +3894,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
unsigned int new_batch_sz, gfp_t flags)
{
- struct sock **new_batch;
+ union bpf_udp_iter_batch_item *new_batch;
new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
flags | __GFP_NOWARN);