@@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
1279
1279
{
1280
1280
struct io_kiocb * cur ;
1281
1281
1282
- io_for_each_link (cur , req )
1283
- io_prep_async_work (cur );
1282
+ if (req -> flags & REQ_F_LINK_TIMEOUT ) {
1283
+ struct io_ring_ctx * ctx = req -> ctx ;
1284
+
1285
+ spin_lock_irq (& ctx -> completion_lock );
1286
+ io_for_each_link (cur , req )
1287
+ io_prep_async_work (cur );
1288
+ spin_unlock_irq (& ctx -> completion_lock );
1289
+ } else {
1290
+ io_for_each_link (cur , req )
1291
+ io_prep_async_work (cur );
1292
+ }
1284
1293
}
1285
1294
1286
1295
static void io_queue_async_work (struct io_kiocb * req )
@@ -1950,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
1950
1959
node = next ;
1951
1960
}
1952
1961
if (wq_list_empty (& tctx -> task_list )) {
1962
+ spin_lock_irq (& tctx -> task_lock );
1953
1963
clear_bit (0 , & tctx -> task_state );
1954
- if (wq_list_empty (& tctx -> task_list ))
1964
+ if (wq_list_empty (& tctx -> task_list )) {
1965
+ spin_unlock_irq (& tctx -> task_lock );
1955
1966
break ;
1967
+ }
1968
+ spin_unlock_irq (& tctx -> task_lock );
1956
1969
/* another tctx_task_work() is enqueued, yield */
1957
1970
if (test_and_set_bit (0 , & tctx -> task_state ))
1958
1971
break ;
@@ -2047,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
2047
2060
io_req_task_work_add (req );
2048
2061
}
2049
2062
2063
+ static void io_req_task_queue_reissue (struct io_kiocb * req )
2064
+ {
2065
+ req -> io_task_work .func = io_queue_async_work ;
2066
+ io_req_task_work_add (req );
2067
+ }
2068
+
2050
2069
static inline void io_queue_next (struct io_kiocb * req )
2051
2070
{
2052
2071
struct io_kiocb * nxt = io_req_find_next (req );
@@ -2235,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2235
2254
!(req -> flags & REQ_F_DONT_REISSUE )) {
2236
2255
req -> iopoll_completed = 0 ;
2237
2256
req_ref_get (req );
2238
- io_queue_async_work (req );
2257
+ io_req_task_queue_reissue (req );
2239
2258
continue ;
2240
2259
}
2241
2260
@@ -2428,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
2428
2447
*/
2429
2448
if (percpu_ref_is_dying (& ctx -> refs ))
2430
2449
return false;
2450
+ /*
2451
+ * Play it safe and assume not safe to re-import and reissue if we're
2452
+ * not in the original thread group (or in task context).
2453
+ */
2454
+ if (!same_thread_group (req -> task , current ) || !in_task ())
2455
+ return false;
2431
2456
return true;
2432
2457
}
2433
2458
#else
@@ -2758,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2758
2783
req -> flags &= ~REQ_F_REISSUE ;
2759
2784
if (io_resubmit_prep (req )) {
2760
2785
req_ref_get (req );
2761
- io_queue_async_work (req );
2786
+ io_req_task_queue_reissue (req );
2762
2787
} else {
2763
2788
int cflags = 0 ;
2764
2789
@@ -4914,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
4914
4939
if (req -> poll .events & EPOLLONESHOT )
4915
4940
flags = 0 ;
4916
4941
if (!io_cqring_fill_event (ctx , req -> user_data , error , flags )) {
4917
- io_poll_remove_waitqs (req );
4918
4942
req -> poll .done = true;
4919
4943
flags = 0 ;
4920
4944
}
@@ -4937,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
4937
4961
4938
4962
done = io_poll_complete (req , req -> result );
4939
4963
if (done ) {
4964
+ io_poll_remove_double (req );
4940
4965
hash_del (& req -> hash_node );
4941
4966
} else {
4942
4967
req -> result = 0 ;
@@ -5124,7 +5149,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5124
5149
ipt -> error = - EINVAL ;
5125
5150
5126
5151
spin_lock_irq (& ctx -> completion_lock );
5127
- if (ipt -> error )
5152
+ if (ipt -> error || ( mask && ( poll -> events & EPOLLONESHOT )) )
5128
5153
io_poll_remove_double (req );
5129
5154
if (likely (poll -> head )) {
5130
5155
spin_lock (& poll -> head -> lock );
@@ -5196,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
5196
5221
ret = __io_arm_poll_handler (req , & apoll -> poll , & ipt , mask ,
5197
5222
io_async_wake );
5198
5223
if (ret || ipt .error ) {
5199
- io_poll_remove_double (req );
5200
5224
spin_unlock_irq (& ctx -> completion_lock );
5201
5225
if (ret )
5202
5226
return IO_APOLL_READY ;
0 commit comments