Skip to content

Commit cf9536e

Browse files
committed
io_uring/kbuf: enable bundles for incrementally consumed buffers
The original support for incrementally consumed buffers didn't allow it to be used with bundles, with the assumption being that incremental buffers are generally larger, and hence there's less of a nedd to support it. But that assumption may not be correct - it's perfectly viable to use smaller buffers with incremental consumption, and there may be valid reasons for an application or framework to do so. As there's really no need to explicitly disable bundles with incrementally consumed buffers, allow it. This actually makes the peek side cheaper and simpler, with the completion side basically the same, just needing to iterate for the consumed length. Reported-by: Norman Maurer <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 334f795 commit cf9536e

File tree

1 file changed

+26
-30
lines changed

1 file changed

+26
-30
lines changed

io_uring/kbuf.c

+26-30
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,25 @@ struct io_provide_buf {
3232
__u16 bid;
3333
};
3434

35+
static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36+
{
37+
while (len) {
38+
struct io_uring_buf *buf;
39+
u32 this_len;
40+
41+
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42+
this_len = min_t(int, len, buf->len);
43+
buf->len -= this_len;
44+
if (buf->len) {
45+
buf->addr += this_len;
46+
return false;
47+
}
48+
bl->head++;
49+
len -= this_len;
50+
}
51+
return true;
52+
}
53+
3554
bool io_kbuf_commit(struct io_kiocb *req,
3655
struct io_buffer_list *bl, int len, int nr)
3756
{
@@ -42,20 +61,8 @@ bool io_kbuf_commit(struct io_kiocb *req,
4261

4362
if (unlikely(len < 0))
4463
return true;
45-
46-
if (bl->flags & IOBL_INC) {
47-
struct io_uring_buf *buf;
48-
49-
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
50-
if (WARN_ON_ONCE(len > buf->len))
51-
len = buf->len;
52-
buf->len -= len;
53-
if (buf->len) {
54-
buf->addr += len;
55-
return false;
56-
}
57-
}
58-
64+
if (bl->flags & IOBL_INC)
65+
return io_kbuf_inc_commit(bl, len);
5966
bl->head += nr;
6067
return true;
6168
}
@@ -226,25 +233,14 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
226233
buf = io_ring_head_to_buf(br, head, bl->mask);
227234
if (arg->max_len) {
228235
u32 len = READ_ONCE(buf->len);
236+
size_t needed;
229237

230238
if (unlikely(!len))
231239
return -ENOBUFS;
232-
/*
233-
* Limit incremental buffers to 1 segment. No point trying
234-
* to peek ahead and map more than we need, when the buffers
235-
* themselves should be large when setup with
236-
* IOU_PBUF_RING_INC.
237-
*/
238-
if (bl->flags & IOBL_INC) {
239-
nr_avail = 1;
240-
} else {
241-
size_t needed;
242-
243-
needed = (arg->max_len + len - 1) / len;
244-
needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
245-
if (nr_avail > needed)
246-
nr_avail = needed;
247-
}
240+
needed = (arg->max_len + len - 1) / len;
241+
needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
242+
if (nr_avail > needed)
243+
nr_avail = needed;
248244
}
249245

250246
/*

0 commit comments

Comments
 (0)