@@ -142,7 +142,7 @@ struct io_defer_entry {
142
142
#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
143
143
144
144
static bool io_uring_try_cancel_requests (struct io_ring_ctx * ctx ,
145
- struct task_struct * task ,
145
+ struct io_uring_task * tctx ,
146
146
bool cancel_all );
147
147
148
148
static void io_queue_sqe (struct io_kiocb * req );
@@ -201,12 +201,12 @@ static bool io_match_linked(struct io_kiocb *head)
201
201
* As io_match_task() but protected against racing with linked timeouts.
202
202
* User must not hold timeout_lock.
203
203
*/
204
- bool io_match_task_safe (struct io_kiocb * head , struct task_struct * task ,
204
+ bool io_match_task_safe (struct io_kiocb * head , struct io_uring_task * tctx ,
205
205
bool cancel_all )
206
206
{
207
207
bool matched ;
208
208
209
- if (task && head -> task != task )
209
+ if (tctx && head -> task -> io_uring != tctx )
210
210
return false;
211
211
if (cancel_all )
212
212
return true;
@@ -2987,7 +2987,7 @@ static int io_uring_release(struct inode *inode, struct file *file)
2987
2987
}
2988
2988
2989
2989
struct io_task_cancel {
2990
- struct task_struct * task ;
2990
+ struct io_uring_task * tctx ;
2991
2991
bool all ;
2992
2992
};
2993
2993
@@ -2996,19 +2996,19 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
2996
2996
struct io_kiocb * req = container_of (work , struct io_kiocb , work );
2997
2997
struct io_task_cancel * cancel = data ;
2998
2998
2999
- return io_match_task_safe (req , cancel -> task , cancel -> all );
2999
+ return io_match_task_safe (req , cancel -> tctx , cancel -> all );
3000
3000
}
3001
3001
3002
3002
static __cold bool io_cancel_defer_files (struct io_ring_ctx * ctx ,
3003
- struct task_struct * task ,
3003
+ struct io_uring_task * tctx ,
3004
3004
bool cancel_all )
3005
3005
{
3006
3006
struct io_defer_entry * de ;
3007
3007
LIST_HEAD (list );
3008
3008
3009
3009
spin_lock (& ctx -> completion_lock );
3010
3010
list_for_each_entry_reverse (de , & ctx -> defer_list , list ) {
3011
- if (io_match_task_safe (de -> req , task , cancel_all )) {
3011
+ if (io_match_task_safe (de -> req , tctx , cancel_all )) {
3012
3012
list_cut_position (& list , & ctx -> defer_list , & de -> list );
3013
3013
break ;
3014
3014
}
@@ -3051,11 +3051,10 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3051
3051
}
3052
3052
3053
3053
static __cold bool io_uring_try_cancel_requests (struct io_ring_ctx * ctx ,
3054
- struct task_struct * task ,
3054
+ struct io_uring_task * tctx ,
3055
3055
bool cancel_all )
3056
3056
{
3057
- struct io_task_cancel cancel = { .task = task , .all = cancel_all , };
3058
- struct io_uring_task * tctx = task ? task -> io_uring : NULL ;
3057
+ struct io_task_cancel cancel = { .tctx = tctx , .all = cancel_all , };
3059
3058
enum io_wq_cancel cret ;
3060
3059
bool ret = false;
3061
3060
@@ -3069,9 +3068,9 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3069
3068
if (!ctx -> rings )
3070
3069
return false;
3071
3070
3072
- if (!task ) {
3071
+ if (!tctx ) {
3073
3072
ret |= io_uring_try_cancel_iowq (ctx );
3074
- } else if (tctx && tctx -> io_wq ) {
3073
+ } else if (tctx -> io_wq ) {
3075
3074
/*
3076
3075
* Cancels requests of all rings, not only @ctx, but
3077
3076
* it's fine as the task is in exit/exec.
@@ -3094,15 +3093,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3094
3093
if ((ctx -> flags & IORING_SETUP_DEFER_TASKRUN ) &&
3095
3094
io_allowed_defer_tw_run (ctx ))
3096
3095
ret |= io_run_local_work (ctx , INT_MAX ) > 0 ;
3097
- ret |= io_cancel_defer_files (ctx , task , cancel_all );
3096
+ ret |= io_cancel_defer_files (ctx , tctx , cancel_all );
3098
3097
mutex_lock (& ctx -> uring_lock );
3099
- ret |= io_poll_remove_all (ctx , task , cancel_all );
3100
- ret |= io_waitid_remove_all (ctx , task , cancel_all );
3101
- ret |= io_futex_remove_all (ctx , task , cancel_all );
3102
- ret |= io_uring_try_cancel_uring_cmd (ctx , task , cancel_all );
3098
+ ret |= io_poll_remove_all (ctx , tctx , cancel_all );
3099
+ ret |= io_waitid_remove_all (ctx , tctx , cancel_all );
3100
+ ret |= io_futex_remove_all (ctx , tctx , cancel_all );
3101
+ ret |= io_uring_try_cancel_uring_cmd (ctx , tctx , cancel_all );
3103
3102
mutex_unlock (& ctx -> uring_lock );
3104
- ret |= io_kill_timeouts (ctx , task , cancel_all );
3105
- if (task )
3103
+ ret |= io_kill_timeouts (ctx , tctx , cancel_all );
3104
+ if (tctx )
3106
3105
ret |= io_run_task_work () > 0 ;
3107
3106
else
3108
3107
ret |= flush_delayed_work (& ctx -> fallback_work );
@@ -3155,12 +3154,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3155
3154
if (node -> ctx -> sq_data )
3156
3155
continue ;
3157
3156
loop |= io_uring_try_cancel_requests (node -> ctx ,
3158
- current , cancel_all );
3157
+ current -> io_uring ,
3158
+ cancel_all );
3159
3159
}
3160
3160
} else {
3161
3161
list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list )
3162
3162
loop |= io_uring_try_cancel_requests (ctx ,
3163
- current ,
3163
+ current -> io_uring ,
3164
3164
cancel_all );
3165
3165
}
3166
3166
0 commit comments