Skip to content

Commit 0e83b8e

Browse files
jrifeKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
bpf: udp: Use bpf_udp_iter_batch_item for bpf_udp_iter_state batch items
Prepare for the next patch that tracks cookies between iterations by converting struct sock **batch to union bpf_udp_iter_batch_item *batch inside struct bpf_udp_iter_state. Signed-off-by: Jordan Rife <[email protected]> Reviewed-by: Kuniyuki Iwashima <[email protected]>
1 parent 46e6577 commit 0e83b8e

File tree

1 file changed

+13
-9
lines changed

1 file changed

+13
-9
lines changed

net/ipv4/udp.c

+13-9
Original file line numberDiff line numberDiff line change
@@ -3390,13 +3390,17 @@ struct bpf_iter__udp {
33903390
int bucket __aligned(8);
33913391
};
33923392

3393+
union bpf_udp_iter_batch_item {
3394+
struct sock *sock;
3395+
};
3396+
33933397
struct bpf_udp_iter_state {
33943398
struct udp_iter_state state;
33953399
unsigned int cur_sk;
33963400
unsigned int end_sk;
33973401
unsigned int max_sk;
33983402
int offset;
3399-
struct sock **batch;
3403+
union bpf_udp_iter_batch_item *batch;
34003404
};
34013405

34023406
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
@@ -3457,7 +3461,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
34573461
}
34583462
if (iter->end_sk < iter->max_sk) {
34593463
sock_hold(sk);
3460-
iter->batch[iter->end_sk++] = sk;
3464+
iter->batch[iter->end_sk++].sock = sk;
34613465
}
34623466
batch_sks++;
34633467
}
@@ -3493,7 +3497,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
34933497
}
34943498

34953499
/* Pick up where we left off. */
3496-
sk = iter->batch[iter->end_sk - 1];
3500+
sk = iter->batch[iter->end_sk - 1].sock;
34973501
sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
34983502
struct sock,
34993503
__sk_common.skc_portaddr_node);
@@ -3510,7 +3514,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
35103514
}
35113515

35123516
WARN_ON_ONCE(iter->end_sk != batch_sks);
3513-
return iter->end_sk ? iter->batch[0] : NULL;
3517+
return iter->end_sk ? iter->batch[0].sock : NULL;
35143518
}
35153519

35163520
static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -3522,15 +3526,15 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
35223526
* done with seq_show(), so unref the iter->cur_sk.
35233527
*/
35243528
if (iter->cur_sk < iter->end_sk) {
3525-
sock_put(iter->batch[iter->cur_sk++]);
3529+
sock_put(iter->batch[iter->cur_sk++].sock);
35263530
++iter->offset;
35273531
}
35283532

35293533
/* After updating iter->cur_sk, check if there are more sockets
35303534
* available in the current bucket batch.
35313535
*/
35323536
if (iter->cur_sk < iter->end_sk)
3533-
sk = iter->batch[iter->cur_sk];
3537+
sk = iter->batch[iter->cur_sk].sock;
35343538
else
35353539
/* Prepare a new batch. */
35363540
sk = bpf_iter_udp_batch(seq);
@@ -3596,8 +3600,8 @@ static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
35963600
{
35973601
unsigned int cur_sk = iter->cur_sk;
35983602

3599-
while (cur_sk < iter->end_sk)
3600-
sock_put(iter->batch[cur_sk++]);
3603+
while (iter->cur_sk < iter->end_sk)
3604+
sock_put(iter->batch[cur_sk++].sock);
36013605
}
36023606

36033607
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
@@ -3858,7 +3862,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
38583862
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
38593863
unsigned int new_batch_sz, int flags)
38603864
{
3861-
struct sock **new_batch;
3865+
union bpf_udp_iter_batch_item *new_batch;
38623866

38633867
new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
38643868
flags | __GFP_NOWARN);

0 commit comments

Comments
 (0)