43
43
44
44
static DEFINE_PER_CPU (struct llist_head , blk_cpu_done ) ;
45
45
static DEFINE_PER_CPU (call_single_data_t , blk_cpu_csd ) ;
46
+ static DEFINE_MUTEX (blk_mq_cpuhp_lock );
46
47
47
48
static void blk_mq_insert_request (struct request * rq , blk_insert_t flags );
48
49
static void blk_mq_request_bypass_insert (struct request * rq ,
@@ -3736,13 +3737,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3736
3737
return 0 ;
3737
3738
}
3738
3739
3739
- static void blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3740
+ static void __blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3740
3741
{
3741
- if (!(hctx -> flags & BLK_MQ_F_STACKING ))
3742
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3743
+
3744
+ if (!(hctx -> flags & BLK_MQ_F_STACKING ) &&
3745
+ !hlist_unhashed (& hctx -> cpuhp_online )) {
3742
3746
cpuhp_state_remove_instance_nocalls (CPUHP_AP_BLK_MQ_ONLINE ,
3743
3747
& hctx -> cpuhp_online );
3744
- cpuhp_state_remove_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3745
- & hctx -> cpuhp_dead );
3748
+ INIT_HLIST_NODE (& hctx -> cpuhp_online );
3749
+ }
3750
+
3751
+ if (!hlist_unhashed (& hctx -> cpuhp_dead )) {
3752
+ cpuhp_state_remove_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3753
+ & hctx -> cpuhp_dead );
3754
+ INIT_HLIST_NODE (& hctx -> cpuhp_dead );
3755
+ }
3756
+ }
3757
+
3758
+ static void blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3759
+ {
3760
+ mutex_lock (& blk_mq_cpuhp_lock );
3761
+ __blk_mq_remove_cpuhp (hctx );
3762
+ mutex_unlock (& blk_mq_cpuhp_lock );
3763
+ }
3764
+
3765
+ static void __blk_mq_add_cpuhp (struct blk_mq_hw_ctx * hctx )
3766
+ {
3767
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3768
+
3769
+ if (!(hctx -> flags & BLK_MQ_F_STACKING ) &&
3770
+ hlist_unhashed (& hctx -> cpuhp_online ))
3771
+ cpuhp_state_add_instance_nocalls (CPUHP_AP_BLK_MQ_ONLINE ,
3772
+ & hctx -> cpuhp_online );
3773
+
3774
+ if (hlist_unhashed (& hctx -> cpuhp_dead ))
3775
+ cpuhp_state_add_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3776
+ & hctx -> cpuhp_dead );
3777
+ }
3778
+
3779
+ static void __blk_mq_remove_cpuhp_list (struct list_head * head )
3780
+ {
3781
+ struct blk_mq_hw_ctx * hctx ;
3782
+
3783
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3784
+
3785
+ list_for_each_entry (hctx , head , hctx_list )
3786
+ __blk_mq_remove_cpuhp (hctx );
3787
+ }
3788
+
3789
+ /*
3790
+ * Unregister cpuhp callbacks from exited hw queues
3791
+ *
3792
+ * Safe to call if this `request_queue` is live
3793
+ */
3794
+ static void blk_mq_remove_hw_queues_cpuhp (struct request_queue * q )
3795
+ {
3796
+ LIST_HEAD (hctx_list );
3797
+
3798
+ spin_lock (& q -> unused_hctx_lock );
3799
+ list_splice_init (& q -> unused_hctx_list , & hctx_list );
3800
+ spin_unlock (& q -> unused_hctx_lock );
3801
+
3802
+ mutex_lock (& blk_mq_cpuhp_lock );
3803
+ __blk_mq_remove_cpuhp_list (& hctx_list );
3804
+ mutex_unlock (& blk_mq_cpuhp_lock );
3805
+
3806
+ spin_lock (& q -> unused_hctx_lock );
3807
+ list_splice (& hctx_list , & q -> unused_hctx_list );
3808
+ spin_unlock (& q -> unused_hctx_lock );
3809
+ }
3810
+
3811
+ /*
3812
+ * Register cpuhp callbacks from all hw queues
3813
+ *
3814
+ * Safe to call if this `request_queue` is live
3815
+ */
3816
+ static void blk_mq_add_hw_queues_cpuhp (struct request_queue * q )
3817
+ {
3818
+ struct blk_mq_hw_ctx * hctx ;
3819
+ unsigned long i ;
3820
+
3821
+ mutex_lock (& blk_mq_cpuhp_lock );
3822
+ queue_for_each_hw_ctx (q , hctx , i )
3823
+ __blk_mq_add_cpuhp (hctx );
3824
+ mutex_unlock (& blk_mq_cpuhp_lock );
3746
3825
}
3747
3826
3748
3827
/*
@@ -3793,8 +3872,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
3793
3872
if (set -> ops -> exit_hctx )
3794
3873
set -> ops -> exit_hctx (hctx , hctx_idx );
3795
3874
3796
- blk_mq_remove_cpuhp (hctx );
3797
-
3798
3875
xa_erase (& q -> hctx_table , hctx_idx );
3799
3876
3800
3877
spin_lock (& q -> unused_hctx_lock );
@@ -3811,6 +3888,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
3811
3888
queue_for_each_hw_ctx (q , hctx , i ) {
3812
3889
if (i == nr_queue )
3813
3890
break ;
3891
+ blk_mq_remove_cpuhp (hctx );
3814
3892
blk_mq_exit_hctx (q , set , hctx , i );
3815
3893
}
3816
3894
}
@@ -3874,6 +3952,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3874
3952
INIT_DELAYED_WORK (& hctx -> run_work , blk_mq_run_work_fn );
3875
3953
spin_lock_init (& hctx -> lock );
3876
3954
INIT_LIST_HEAD (& hctx -> dispatch );
3955
+ INIT_HLIST_NODE (& hctx -> cpuhp_dead );
3956
+ INIT_HLIST_NODE (& hctx -> cpuhp_online );
3877
3957
hctx -> queue = q ;
3878
3958
hctx -> flags = set -> flags & ~BLK_MQ_F_TAG_QUEUE_SHARED ;
3879
3959
@@ -4412,6 +4492,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4412
4492
xa_for_each_start (& q -> hctx_table , j , hctx , j )
4413
4493
blk_mq_exit_hctx (q , set , hctx , j );
4414
4494
mutex_unlock (& q -> sysfs_lock );
4495
+
4496
+ /* unregister cpuhp callbacks for exited hctxs */
4497
+ blk_mq_remove_hw_queues_cpuhp (q );
4498
+
4499
+ /* register cpuhp for new initialized hctxs */
4500
+ blk_mq_add_hw_queues_cpuhp (q );
4415
4501
}
4416
4502
4417
4503
int blk_mq_init_allocated_queue (struct blk_mq_tag_set * set ,
0 commit comments