Skip to content

Commit 3c30ef0

Browse files
committed
io_uring: never attempt iopoll reissue from release path
There are two reasons why this shouldn't be done: 1) Ring is exiting, and we're canceling requests anyway. Any request should be canceled anyway. In theory, this could iterate for a number of times if someone else is also driving the target block queue into request starvation, however the likelihood of this happening is miniscule. 2) If the original task decided to pass the ring to another task, then we don't want to be reissuing from this context as it may be an unrelated task or context. No assumptions should be made about the context in which ->release() is run. This can only happen for pure read/write, and we'll get -EFAULT on them anyway. Link: https://lore.kernel.org/io-uring/[email protected]/ Reported-by: Al Viro <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 0cc936f commit 3c30ef0

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

fs/io_uring.c

+7-7
Original file line numberDiff line numberDiff line change
@@ -2205,7 +2205,7 @@ static inline bool io_run_task_work(void)
22052205
* Find and free completed poll iocbs
22062206
*/
22072207
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2208-
struct list_head *done)
2208+
struct list_head *done, bool resubmit)
22092209
{
22102210
struct req_batch rb;
22112211
struct io_kiocb *req;
@@ -2220,7 +2220,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22202220
req = list_first_entry(done, struct io_kiocb, inflight_entry);
22212221
list_del(&req->inflight_entry);
22222222

2223-
if (READ_ONCE(req->result) == -EAGAIN &&
2223+
if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
22242224
!(req->flags & REQ_F_DONT_REISSUE)) {
22252225
req->iopoll_completed = 0;
22262226
req_ref_get(req);
@@ -2244,7 +2244,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22442244
}
22452245

22462246
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2247-
long min)
2247+
long min, bool resubmit)
22482248
{
22492249
struct io_kiocb *req, *tmp;
22502250
LIST_HEAD(done);
@@ -2287,7 +2287,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
22872287
}
22882288

22892289
if (!list_empty(&done))
2290-
io_iopoll_complete(ctx, nr_events, &done);
2290+
io_iopoll_complete(ctx, nr_events, &done, resubmit);
22912291

22922292
return ret;
22932293
}
@@ -2305,7 +2305,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
23052305
while (!list_empty(&ctx->iopoll_list)) {
23062306
unsigned int nr_events = 0;
23072307

2308-
io_do_iopoll(ctx, &nr_events, 0);
2308+
io_do_iopoll(ctx, &nr_events, 0, false);
23092309

23102310
/* let it sleep and repeat later if can't complete a request */
23112311
if (nr_events == 0)
@@ -2367,7 +2367,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
23672367
list_empty(&ctx->iopoll_list))
23682368
break;
23692369
}
2370-
ret = io_do_iopoll(ctx, &nr_events, min);
2370+
ret = io_do_iopoll(ctx, &nr_events, min, true);
23712371
} while (!ret && nr_events < min && !need_resched());
23722372
out:
23732373
mutex_unlock(&ctx->uring_lock);
@@ -6798,7 +6798,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
67986798

67996799
mutex_lock(&ctx->uring_lock);
68006800
if (!list_empty(&ctx->iopoll_list))
6801-
io_do_iopoll(ctx, &nr_events, 0);
6801+
io_do_iopoll(ctx, &nr_events, 0, true);
68026802

68036803
/*
68046804
* Don't submit if refs are dying, good for io_uring_register(),

0 commit comments

Comments
 (0)