@@ -3390,13 +3390,17 @@ struct bpf_iter__udp {
3390
3390
int bucket __aligned (8 );
3391
3391
};
3392
3392
3393
+ union bpf_udp_iter_batch_item {
3394
+ struct sock * sock ;
3395
+ };
3396
+
3393
3397
struct bpf_udp_iter_state {
3394
3398
struct udp_iter_state state ;
3395
3399
unsigned int cur_sk ;
3396
3400
unsigned int end_sk ;
3397
3401
unsigned int max_sk ;
3398
3402
int offset ;
3399
- struct sock * * batch ;
3403
+ union bpf_udp_iter_batch_item * batch ;
3400
3404
};
3401
3405
3402
3406
static int bpf_iter_udp_realloc_batch (struct bpf_udp_iter_state * iter ,
@@ -3457,7 +3461,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3457
3461
}
3458
3462
if (iter -> end_sk < iter -> max_sk ) {
3459
3463
sock_hold (sk );
3460
- iter -> batch [iter -> end_sk ++ ] = sk ;
3464
+ iter -> batch [iter -> end_sk ++ ]. sock = sk ;
3461
3465
}
3462
3466
batch_sks ++ ;
3463
3467
}
@@ -3493,7 +3497,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3493
3497
}
3494
3498
3495
3499
/* Pick up where we left off. */
3496
- sk = iter -> batch [iter -> end_sk - 1 ];
3500
+ sk = iter -> batch [iter -> end_sk - 1 ]. sock ;
3497
3501
sk = hlist_entry_safe (sk -> __sk_common .skc_portaddr_node .next ,
3498
3502
struct sock ,
3499
3503
__sk_common .skc_portaddr_node );
@@ -3510,7 +3514,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3510
3514
}
3511
3515
3512
3516
WARN_ON_ONCE (iter -> end_sk != batch_sks );
3513
- return iter -> end_sk ? iter -> batch [0 ] : NULL ;
3517
+ return iter -> end_sk ? iter -> batch [0 ]. sock : NULL ;
3514
3518
}
3515
3519
3516
3520
static void * bpf_iter_udp_seq_next (struct seq_file * seq , void * v , loff_t * pos )
@@ -3522,15 +3526,15 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3522
3526
* done with seq_show(), so unref the iter->cur_sk.
3523
3527
*/
3524
3528
if (iter -> cur_sk < iter -> end_sk ) {
3525
- sock_put (iter -> batch [iter -> cur_sk ++ ]);
3529
+ sock_put (iter -> batch [iter -> cur_sk ++ ]. sock );
3526
3530
++ iter -> offset ;
3527
3531
}
3528
3532
3529
3533
/* After updating iter->cur_sk, check if there are more sockets
3530
3534
* available in the current bucket batch.
3531
3535
*/
3532
3536
if (iter -> cur_sk < iter -> end_sk )
3533
- sk = iter -> batch [iter -> cur_sk ];
3537
+ sk = iter -> batch [iter -> cur_sk ]. sock ;
3534
3538
else
3535
3539
/* Prepare a new batch. */
3536
3540
sk = bpf_iter_udp_batch (seq );
@@ -3596,8 +3600,8 @@ static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
3596
3600
{
3597
3601
unsigned int cur_sk = iter -> cur_sk ;
3598
3602
3599
- while (cur_sk < iter -> end_sk )
3600
- sock_put (iter -> batch [cur_sk ++ ]);
3603
+ while (iter -> cur_sk < iter -> end_sk )
3604
+ sock_put (iter -> batch [cur_sk ++ ]. sock );
3601
3605
}
3602
3606
3603
3607
static void bpf_iter_udp_seq_stop (struct seq_file * seq , void * v )
@@ -3858,7 +3862,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
3858
3862
static int bpf_iter_udp_realloc_batch (struct bpf_udp_iter_state * iter ,
3859
3863
unsigned int new_batch_sz , int flags )
3860
3864
{
3861
- struct sock * * new_batch ;
3865
+ union bpf_udp_iter_batch_item * new_batch ;
3862
3866
3863
3867
new_batch = kvmalloc_array (new_batch_sz , sizeof (* new_batch ),
3864
3868
flags | __GFP_NOWARN );
0 commit comments