@@ -136,11 +136,8 @@ static bool io_net_retry(struct socket *sock, int flags)
136
136
137
137
static void io_netmsg_iovec_free (struct io_async_msghdr * kmsg )
138
138
{
139
- if (kmsg -> free_iov ) {
140
- kfree (kmsg -> free_iov );
141
- kmsg -> free_iov_nr = 0 ;
142
- kmsg -> free_iov = NULL ;
143
- }
139
+ if (kmsg -> vec .iovec )
140
+ io_vec_free (& kmsg -> vec );
144
141
}
145
142
146
143
static void io_netmsg_recycle (struct io_kiocb * req , unsigned int issue_flags )
@@ -154,7 +151,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
154
151
}
155
152
156
153
/* Let normal cleanup path reap it if we fail adding to the cache */
157
- io_alloc_cache_kasan (& hdr -> free_iov , & hdr -> free_iov_nr );
154
+ io_alloc_cache_vec_kasan (& hdr -> vec );
158
155
if (io_alloc_cache_put (& req -> ctx -> netmsg_cache , hdr )) {
159
156
req -> async_data = NULL ;
160
157
req -> flags &= ~REQ_F_ASYNC_DATA ;
@@ -171,7 +168,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
171
168
return NULL ;
172
169
173
170
/* If the async data was cached, we might have an iov cached inside. */
174
- if (hdr -> free_iov )
171
+ if (hdr -> vec . iovec )
175
172
req -> flags |= REQ_F_NEED_CLEANUP ;
176
173
return hdr ;
177
174
}
@@ -182,10 +179,7 @@ static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg
182
179
{
183
180
if (iov ) {
184
181
req -> flags |= REQ_F_NEED_CLEANUP ;
185
- kmsg -> free_iov_nr = kmsg -> msg .msg_iter .nr_segs ;
186
- if (kmsg -> free_iov )
187
- kfree (kmsg -> free_iov );
188
- kmsg -> free_iov = iov ;
182
+ io_vec_reset_iovec (& kmsg -> vec , iov , kmsg -> msg .msg_iter .nr_segs );
189
183
}
190
184
}
191
185
@@ -208,9 +202,9 @@ static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg
208
202
struct iovec * iov ;
209
203
int ret , nr_segs ;
210
204
211
- if (iomsg -> free_iov ) {
212
- nr_segs = iomsg -> free_iov_nr ;
213
- iov = iomsg -> free_iov ;
205
+ if (iomsg -> vec . iovec ) {
206
+ nr_segs = iomsg -> vec . nr ;
207
+ iov = iomsg -> vec . iovec ;
214
208
} else {
215
209
nr_segs = 1 ;
216
210
iov = & iomsg -> fast_iov ;
@@ -468,7 +462,7 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
468
462
if (iter_is_ubuf (& kmsg -> msg .msg_iter ))
469
463
return 1 ;
470
464
471
- iov = kmsg -> free_iov ;
465
+ iov = kmsg -> vec . iovec ;
472
466
if (!iov )
473
467
iov = & kmsg -> fast_iov ;
474
468
@@ -584,9 +578,9 @@ static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
584
578
.nr_iovs = 1 ,
585
579
};
586
580
587
- if (kmsg -> free_iov ) {
588
- arg .nr_iovs = kmsg -> free_iov_nr ;
589
- arg .iovs = kmsg -> free_iov ;
581
+ if (kmsg -> vec . iovec ) {
582
+ arg .nr_iovs = kmsg -> vec . nr ;
583
+ arg .iovs = kmsg -> vec . iovec ;
590
584
arg .mode = KBUF_MODE_FREE ;
591
585
}
592
586
@@ -599,9 +593,9 @@ static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
599
593
if (unlikely (ret < 0 ))
600
594
return ret ;
601
595
602
- if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> free_iov ) {
603
- kmsg -> free_iov_nr = ret ;
604
- kmsg -> free_iov = arg .iovs ;
596
+ if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> vec . iovec ) {
597
+ kmsg -> vec . nr = ret ;
598
+ kmsg -> vec . iovec = arg .iovs ;
605
599
req -> flags |= REQ_F_NEED_CLEANUP ;
606
600
}
607
601
sr -> len = arg .out_len ;
@@ -1085,9 +1079,9 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
1085
1079
.mode = KBUF_MODE_EXPAND ,
1086
1080
};
1087
1081
1088
- if (kmsg -> free_iov ) {
1089
- arg .nr_iovs = kmsg -> free_iov_nr ;
1090
- arg .iovs = kmsg -> free_iov ;
1082
+ if (kmsg -> vec . iovec ) {
1083
+ arg .nr_iovs = kmsg -> vec . nr ;
1084
+ arg .iovs = kmsg -> vec . iovec ;
1091
1085
arg .mode |= KBUF_MODE_FREE ;
1092
1086
}
1093
1087
@@ -1106,9 +1100,9 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
1106
1100
}
1107
1101
iov_iter_init (& kmsg -> msg .msg_iter , ITER_DEST , arg .iovs , ret ,
1108
1102
arg .out_len );
1109
- if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> free_iov ) {
1110
- kmsg -> free_iov_nr = ret ;
1111
- kmsg -> free_iov = arg .iovs ;
1103
+ if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> vec . iovec ) {
1104
+ kmsg -> vec . nr = ret ;
1105
+ kmsg -> vec . iovec = arg .iovs ;
1112
1106
req -> flags |= REQ_F_NEED_CLEANUP ;
1113
1107
}
1114
1108
} else {
@@ -1874,8 +1868,7 @@ void io_netmsg_cache_free(const void *entry)
1874
1868
{
1875
1869
struct io_async_msghdr * kmsg = (struct io_async_msghdr * ) entry ;
1876
1870
1877
- if (kmsg -> free_iov )
1878
- io_netmsg_iovec_free (kmsg );
1871
+ io_vec_free (& kmsg -> vec );
1879
1872
kfree (kmsg );
1880
1873
}
1881
1874
#endif
0 commit comments