@@ -4462,7 +4462,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4462
4462
unsigned long i , j ;
4463
4463
4464
4464
/* protect against switching io scheduler */
4465
- mutex_lock (& q -> sysfs_lock );
4465
+ lockdep_assert_held (& q -> sysfs_lock );
4466
+
4466
4467
for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
4467
4468
int old_node ;
4468
4469
int node = blk_mq_get_hctx_node (set , i );
@@ -4495,7 +4496,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4495
4496
4496
4497
xa_for_each_start (& q -> hctx_table , j , hctx , j )
4497
4498
blk_mq_exit_hctx (q , set , hctx , j );
4498
- mutex_unlock (& q -> sysfs_lock );
4499
4499
4500
4500
/* unregister cpuhp callbacks for exited hctxs */
4501
4501
blk_mq_remove_hw_queues_cpuhp (q );
@@ -4527,10 +4527,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4527
4527
4528
4528
xa_init (& q -> hctx_table );
4529
4529
4530
+ mutex_lock (& q -> sysfs_lock );
4531
+
4530
4532
blk_mq_realloc_hw_ctxs (set , q );
4531
4533
if (!q -> nr_hw_queues )
4532
4534
goto err_hctxs ;
4533
4535
4536
+ mutex_unlock (& q -> sysfs_lock );
4537
+
4534
4538
INIT_WORK (& q -> timeout_work , blk_mq_timeout_work );
4535
4539
blk_queue_rq_timeout (q , set -> timeout ? set -> timeout : 30 * HZ );
4536
4540
@@ -4549,6 +4553,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4549
4553
return 0 ;
4550
4554
4551
4555
err_hctxs :
4556
+ mutex_unlock (& q -> sysfs_lock );
4552
4557
blk_mq_release (q );
4553
4558
err_exit :
4554
4559
q -> mq_ops = NULL ;
@@ -4929,12 +4934,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
4929
4934
return false;
4930
4935
4931
4936
/* q->elevator needs protection from ->sysfs_lock */
4932
- mutex_lock (& q -> sysfs_lock );
4937
+ lockdep_assert_held (& q -> sysfs_lock );
4933
4938
4934
4939
/* the check has to be done with holding sysfs_lock */
4935
4940
if (!q -> elevator ) {
4936
4941
kfree (qe );
4937
- goto unlock ;
4942
+ goto out ;
4938
4943
}
4939
4944
4940
4945
INIT_LIST_HEAD (& qe -> node );
@@ -4944,9 +4949,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
4944
4949
__elevator_get (qe -> type );
4945
4950
list_add (& qe -> node , head );
4946
4951
elevator_disable (q );
4947
- unlock :
4948
- mutex_unlock (& q -> sysfs_lock );
4949
-
4952
+ out :
4950
4953
return true;
4951
4954
}
4952
4955
@@ -4975,11 +4978,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
4975
4978
list_del (& qe -> node );
4976
4979
kfree (qe );
4977
4980
4978
- mutex_lock (& q -> sysfs_lock );
4979
4981
elevator_switch (q , t );
4980
4982
/* drop the reference acquired in blk_mq_elv_switch_none */
4981
4983
elevator_put (t );
4982
- mutex_unlock (& q -> sysfs_lock );
4983
4984
}
4984
4985
4985
4986
static void __blk_mq_update_nr_hw_queues (struct blk_mq_tag_set * set ,
@@ -4999,8 +5000,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4999
5000
if (set -> nr_maps == 1 && nr_hw_queues == set -> nr_hw_queues )
5000
5001
return ;
5001
5002
5002
- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5003
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
5004
+ mutex_lock (& q -> sysfs_dir_lock );
5005
+ mutex_lock (& q -> sysfs_lock );
5003
5006
blk_mq_freeze_queue (q );
5007
+ }
5004
5008
/*
5005
5009
* Switch IO scheduler to 'none', cleaning up the data associated
5006
5010
* with the previous scheduler. We will switch back once we are done
@@ -5056,8 +5060,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
5056
5060
list_for_each_entry (q , & set -> tag_list , tag_set_list )
5057
5061
blk_mq_elv_switch_back (& head , q );
5058
5062
5059
- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5063
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
5060
5064
blk_mq_unfreeze_queue (q );
5065
+ mutex_unlock (& q -> sysfs_lock );
5066
+ mutex_unlock (& q -> sysfs_dir_lock );
5067
+ }
5061
5068
5062
5069
/* Free the excess tags when nr_hw_queues shrink. */
5063
5070
for (i = set -> nr_hw_queues ; i < prev_nr_hw_queues ; i ++ )
0 commit comments