Skip to content

Commit 27eb687

Browse files
committed
Merge tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: - A fix for block backed reissue (me) - Reissue context hardening (me) - Async link locking fix (Pavel) * tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block: io_uring: fix poll requests leaking second poll entries io_uring: don't block level reissue off completion path io_uring: always reissue from task_work context io_uring: fix race in unified task_work running io_uring: fix io_prep_async_link locking
2 parents f6c5971 + a890d01 commit 27eb687

File tree

1 file changed

+32
-8
lines changed

1 file changed

+32
-8
lines changed

fs/io_uring.c

+32-8
Original file line numberDiff line numberDiff line change
@@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
12791279
{
12801280
struct io_kiocb *cur;
12811281

1282-
io_for_each_link(cur, req)
1283-
io_prep_async_work(cur);
1282+
if (req->flags & REQ_F_LINK_TIMEOUT) {
1283+
struct io_ring_ctx *ctx = req->ctx;
1284+
1285+
spin_lock_irq(&ctx->completion_lock);
1286+
io_for_each_link(cur, req)
1287+
io_prep_async_work(cur);
1288+
spin_unlock_irq(&ctx->completion_lock);
1289+
} else {
1290+
io_for_each_link(cur, req)
1291+
io_prep_async_work(cur);
1292+
}
12841293
}
12851294

12861295
static void io_queue_async_work(struct io_kiocb *req)
@@ -1950,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
19501959
node = next;
19511960
}
19521961
if (wq_list_empty(&tctx->task_list)) {
1962+
spin_lock_irq(&tctx->task_lock);
19531963
clear_bit(0, &tctx->task_state);
1954-
if (wq_list_empty(&tctx->task_list))
1964+
if (wq_list_empty(&tctx->task_list)) {
1965+
spin_unlock_irq(&tctx->task_lock);
19551966
break;
1967+
}
1968+
spin_unlock_irq(&tctx->task_lock);
19561969
/* another tctx_task_work() is enqueued, yield */
19571970
if (test_and_set_bit(0, &tctx->task_state))
19581971
break;
@@ -2047,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
20472060
io_req_task_work_add(req);
20482061
}
20492062

2063+
static void io_req_task_queue_reissue(struct io_kiocb *req)
2064+
{
2065+
req->io_task_work.func = io_queue_async_work;
2066+
io_req_task_work_add(req);
2067+
}
2068+
20502069
static inline void io_queue_next(struct io_kiocb *req)
20512070
{
20522071
struct io_kiocb *nxt = io_req_find_next(req);
@@ -2235,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22352254
!(req->flags & REQ_F_DONT_REISSUE)) {
22362255
req->iopoll_completed = 0;
22372256
req_ref_get(req);
2238-
io_queue_async_work(req);
2257+
io_req_task_queue_reissue(req);
22392258
continue;
22402259
}
22412260

@@ -2428,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
24282447
*/
24292448
if (percpu_ref_is_dying(&ctx->refs))
24302449
return false;
2450+
/*
2451+
* Play it safe and assume not safe to re-import and reissue if we're
2452+
* not in the original thread group (or in task context).
2453+
*/
2454+
if (!same_thread_group(req->task, current) || !in_task())
2455+
return false;
24312456
return true;
24322457
}
24332458
#else
@@ -2758,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
27582783
req->flags &= ~REQ_F_REISSUE;
27592784
if (io_resubmit_prep(req)) {
27602785
req_ref_get(req);
2761-
io_queue_async_work(req);
2786+
io_req_task_queue_reissue(req);
27622787
} else {
27632788
int cflags = 0;
27642789

@@ -4914,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
49144939
if (req->poll.events & EPOLLONESHOT)
49154940
flags = 0;
49164941
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
4917-
io_poll_remove_waitqs(req);
49184942
req->poll.done = true;
49194943
flags = 0;
49204944
}
@@ -4937,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
49374961

49384962
done = io_poll_complete(req, req->result);
49394963
if (done) {
4964+
io_poll_remove_double(req);
49404965
hash_del(&req->hash_node);
49414966
} else {
49424967
req->result = 0;
@@ -5124,7 +5149,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
51245149
ipt->error = -EINVAL;
51255150

51265151
spin_lock_irq(&ctx->completion_lock);
5127-
if (ipt->error)
5152+
if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
51285153
io_poll_remove_double(req);
51295154
if (likely(poll->head)) {
51305155
spin_lock(&poll->head->lock);
@@ -5196,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
51965221
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
51975222
io_async_wake);
51985223
if (ret || ipt.error) {
5199-
io_poll_remove_double(req);
52005224
spin_unlock_irq(&ctx->completion_lock);
52015225
if (ret)
52025226
return IO_APOLL_READY;

0 commit comments

Comments
 (0)