Skip to content

Commit f03baec

Browse files
committed
io_uring: move cancelations to be io_uring_task based
Right now the task_struct pointer is used as the key to match a task, but in preparation for some io_kiocb changes, move it to using struct io_uring_task instead. No functional changes intended in this patch. Signed-off-by: Jens Axboe <[email protected]>
1 parent 6f94cbc commit f03baec

12 files changed

+40
-40
lines changed

io_uring/futex.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
141141
return -ENOENT;
142142
}
143143

144-
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
144+
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
145145
bool cancel_all)
146146
{
147147
struct hlist_node *tmp;
@@ -151,7 +151,7 @@ bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
151151
lockdep_assert_held(&ctx->uring_lock);
152152

153153
hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) {
154-
if (!io_match_task_safe(req, task, cancel_all))
154+
if (!io_match_task_safe(req, tctx, cancel_all))
155155
continue;
156156
hlist_del_init(&req->hash_node);
157157
__io_futex_cancel(ctx, req);

io_uring/futex.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
1111
#if defined(CONFIG_FUTEX)
1212
int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
1313
unsigned int issue_flags);
14-
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
14+
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
1515
bool cancel_all);
1616
bool io_futex_cache_init(struct io_ring_ctx *ctx);
1717
void io_futex_cache_free(struct io_ring_ctx *ctx);
@@ -23,7 +23,7 @@ static inline int io_futex_cancel(struct io_ring_ctx *ctx,
2323
return 0;
2424
}
2525
static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
26-
struct task_struct *task, bool cancel_all)
26+
struct io_uring_task *tctx, bool cancel_all)
2727
{
2828
return false;
2929
}

io_uring/io_uring.c

+21-21
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ struct io_defer_entry {
142142
#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
143143

144144
static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
145-
struct task_struct *task,
145+
struct io_uring_task *tctx,
146146
bool cancel_all);
147147

148148
static void io_queue_sqe(struct io_kiocb *req);
@@ -201,12 +201,12 @@ static bool io_match_linked(struct io_kiocb *head)
201201
* As io_match_task() but protected against racing with linked timeouts.
202202
* User must not hold timeout_lock.
203203
*/
204-
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
204+
bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
205205
bool cancel_all)
206206
{
207207
bool matched;
208208

209-
if (task && head->task != task)
209+
if (tctx && head->task->io_uring != tctx)
210210
return false;
211211
if (cancel_all)
212212
return true;
@@ -2987,7 +2987,7 @@ static int io_uring_release(struct inode *inode, struct file *file)
29872987
}
29882988

29892989
struct io_task_cancel {
2990-
struct task_struct *task;
2990+
struct io_uring_task *tctx;
29912991
bool all;
29922992
};
29932993

@@ -2996,19 +2996,19 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
29962996
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
29972997
struct io_task_cancel *cancel = data;
29982998

2999-
return io_match_task_safe(req, cancel->task, cancel->all);
2999+
return io_match_task_safe(req, cancel->tctx, cancel->all);
30003000
}
30013001

30023002
static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3003-
struct task_struct *task,
3003+
struct io_uring_task *tctx,
30043004
bool cancel_all)
30053005
{
30063006
struct io_defer_entry *de;
30073007
LIST_HEAD(list);
30083008

30093009
spin_lock(&ctx->completion_lock);
30103010
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3011-
if (io_match_task_safe(de->req, task, cancel_all)) {
3011+
if (io_match_task_safe(de->req, tctx, cancel_all)) {
30123012
list_cut_position(&list, &ctx->defer_list, &de->list);
30133013
break;
30143014
}
@@ -3051,11 +3051,10 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
30513051
}
30523052

30533053
static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3054-
struct task_struct *task,
3054+
struct io_uring_task *tctx,
30553055
bool cancel_all)
30563056
{
3057-
struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3058-
struct io_uring_task *tctx = task ? task->io_uring : NULL;
3057+
struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, };
30593058
enum io_wq_cancel cret;
30603059
bool ret = false;
30613060

@@ -3069,9 +3068,9 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30693068
if (!ctx->rings)
30703069
return false;
30713070

3072-
if (!task) {
3071+
if (!tctx) {
30733072
ret |= io_uring_try_cancel_iowq(ctx);
3074-
} else if (tctx && tctx->io_wq) {
3073+
} else if (tctx->io_wq) {
30753074
/*
30763075
* Cancels requests of all rings, not only @ctx, but
30773076
* it's fine as the task is in exit/exec.
@@ -3094,15 +3093,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30943093
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
30953094
io_allowed_defer_tw_run(ctx))
30963095
ret |= io_run_local_work(ctx, INT_MAX) > 0;
3097-
ret |= io_cancel_defer_files(ctx, task, cancel_all);
3096+
ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
30983097
mutex_lock(&ctx->uring_lock);
3099-
ret |= io_poll_remove_all(ctx, task, cancel_all);
3100-
ret |= io_waitid_remove_all(ctx, task, cancel_all);
3101-
ret |= io_futex_remove_all(ctx, task, cancel_all);
3102-
ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
3098+
ret |= io_poll_remove_all(ctx, tctx, cancel_all);
3099+
ret |= io_waitid_remove_all(ctx, tctx, cancel_all);
3100+
ret |= io_futex_remove_all(ctx, tctx, cancel_all);
3101+
ret |= io_uring_try_cancel_uring_cmd(ctx, tctx, cancel_all);
31033102
mutex_unlock(&ctx->uring_lock);
3104-
ret |= io_kill_timeouts(ctx, task, cancel_all);
3105-
if (task)
3103+
ret |= io_kill_timeouts(ctx, tctx, cancel_all);
3104+
if (tctx)
31063105
ret |= io_run_task_work() > 0;
31073106
else
31083107
ret |= flush_delayed_work(&ctx->fallback_work);
@@ -3155,12 +3154,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
31553154
if (node->ctx->sq_data)
31563155
continue;
31573156
loop |= io_uring_try_cancel_requests(node->ctx,
3158-
current, cancel_all);
3157+
current->io_uring,
3158+
cancel_all);
31593159
}
31603160
} else {
31613161
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
31623162
loop |= io_uring_try_cancel_requests(ctx,
3163-
current,
3163+
current->io_uring,
31643164
cancel_all);
31653165
}
31663166

io_uring/io_uring.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ void io_queue_next(struct io_kiocb *req);
115115
void io_task_refs_refill(struct io_uring_task *tctx);
116116
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
117117

118-
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
118+
bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
119119
bool cancel_all);
120120

121121
void io_activate_pollwq(struct io_ring_ctx *ctx);

io_uring/poll.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
714714
/*
715715
* Returns true if we found and killed one or more poll requests
716716
*/
717-
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
717+
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
718718
bool cancel_all)
719719
{
720720
unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
@@ -729,7 +729,7 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
729729
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
730730

731731
hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
732-
if (io_match_task_safe(req, tsk, cancel_all)) {
732+
if (io_match_task_safe(req, tctx, cancel_all)) {
733733
hlist_del_init(&req->hash_node);
734734
io_poll_cancel_req(req);
735735
found = true;

io_uring/poll.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ struct io_cancel_data;
4040
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
4141
unsigned issue_flags);
4242
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
43-
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
43+
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
4444
bool cancel_all);
4545

4646
void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);

io_uring/timeout.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -637,13 +637,13 @@ void io_queue_linked_timeout(struct io_kiocb *req)
637637
io_put_req(req);
638638
}
639639

640-
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
640+
static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx,
641641
bool cancel_all)
642642
__must_hold(&head->ctx->timeout_lock)
643643
{
644644
struct io_kiocb *req;
645645

646-
if (task && head->task != task)
646+
if (tctx && head->task->io_uring != tctx)
647647
return false;
648648
if (cancel_all)
649649
return true;
@@ -656,7 +656,7 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
656656
}
657657

658658
/* Returns true if we found and killed one or more timeouts */
659-
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
659+
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
660660
bool cancel_all)
661661
{
662662
struct io_timeout *timeout, *tmp;
@@ -671,7 +671,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
671671
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
672672
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
673673

674-
if (io_match_task(req, tsk, cancel_all) &&
674+
if (io_match_task(req, tctx, cancel_all) &&
675675
io_kill_timeout(req, -ECANCELED))
676676
canceled++;
677677
}

io_uring/timeout.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
2424
__cold void io_flush_timeouts(struct io_ring_ctx *ctx);
2525
struct io_cancel_data;
2626
int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
27-
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
27+
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
2828
bool cancel_all);
2929
void io_queue_linked_timeout(struct io_kiocb *req);
3030
void io_disarm_next(struct io_kiocb *req);

io_uring/uring_cmd.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
4747
}
4848

4949
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
50-
struct task_struct *task, bool cancel_all)
50+
struct io_uring_task *tctx, bool cancel_all)
5151
{
5252
struct hlist_node *tmp;
5353
struct io_kiocb *req;
@@ -61,7 +61,7 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
6161
struct io_uring_cmd);
6262
struct file *file = req->file;
6363

64-
if (!cancel_all && req->task != task)
64+
if (!cancel_all && req->task->io_uring != tctx)
6565
continue;
6666

6767
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {

io_uring/uring_cmd.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
88
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
99

1010
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
11-
struct task_struct *task, bool cancel_all);
11+
struct io_uring_task *tctx, bool cancel_all);

io_uring/waitid.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
184184
return -ENOENT;
185185
}
186186

187-
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
187+
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
188188
bool cancel_all)
189189
{
190190
struct hlist_node *tmp;
@@ -194,7 +194,7 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
194194
lockdep_assert_held(&ctx->uring_lock);
195195

196196
hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
197-
if (!io_match_task_safe(req, task, cancel_all))
197+
if (!io_match_task_safe(req, tctx, cancel_all))
198198
continue;
199199
hlist_del_init(&req->hash_node);
200200
__io_waitid_cancel(ctx, req);

io_uring/waitid.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,5 +11,5 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
1111
int io_waitid(struct io_kiocb *req, unsigned int issue_flags);
1212
int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
1313
unsigned int issue_flags);
14-
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
14+
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
1515
bool cancel_all);

0 commit comments

Comments
 (0)