@@ -4453,7 +4453,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4453
4453
unsigned long i , j ;
4454
4454
4455
4455
/* protect against switching io scheduler */
4456
- mutex_lock (& q -> sysfs_lock );
4456
+ lockdep_assert_held (& q -> sysfs_lock );
4457
+
4457
4458
for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
4458
4459
int old_node ;
4459
4460
int node = blk_mq_get_hctx_node (set , i );
@@ -4486,7 +4487,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4486
4487
4487
4488
xa_for_each_start (& q -> hctx_table , j , hctx , j )
4488
4489
blk_mq_exit_hctx (q , set , hctx , j );
4489
- mutex_unlock (& q -> sysfs_lock );
4490
4490
4491
4491
/* unregister cpuhp callbacks for exited hctxs */
4492
4492
blk_mq_remove_hw_queues_cpuhp (q );
@@ -4518,10 +4518,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4518
4518
4519
4519
xa_init (& q -> hctx_table );
4520
4520
4521
+ mutex_lock (& q -> sysfs_lock );
4522
+
4521
4523
blk_mq_realloc_hw_ctxs (set , q );
4522
4524
if (!q -> nr_hw_queues )
4523
4525
goto err_hctxs ;
4524
4526
4527
+ mutex_unlock (& q -> sysfs_lock );
4528
+
4525
4529
INIT_WORK (& q -> timeout_work , blk_mq_timeout_work );
4526
4530
blk_queue_rq_timeout (q , set -> timeout ? set -> timeout : 30 * HZ );
4527
4531
@@ -4540,6 +4544,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4540
4544
return 0 ;
4541
4545
4542
4546
err_hctxs :
4547
+ mutex_unlock (& q -> sysfs_lock );
4543
4548
blk_mq_release (q );
4544
4549
err_exit :
4545
4550
q -> mq_ops = NULL ;
@@ -4920,12 +4925,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
4920
4925
return false;
4921
4926
4922
4927
/* q->elevator needs protection from ->sysfs_lock */
4923
- mutex_lock (& q -> sysfs_lock );
4928
+ lockdep_assert_held (& q -> sysfs_lock );
4924
4929
4925
4930
/* the check has to be done with holding sysfs_lock */
4926
4931
if (!q -> elevator ) {
4927
4932
kfree (qe );
4928
- goto unlock ;
4933
+ goto out ;
4929
4934
}
4930
4935
4931
4936
INIT_LIST_HEAD (& qe -> node );
@@ -4935,9 +4940,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
4935
4940
__elevator_get (qe -> type );
4936
4941
list_add (& qe -> node , head );
4937
4942
elevator_disable (q );
4938
- unlock :
4939
- mutex_unlock (& q -> sysfs_lock );
4940
-
4943
+ out :
4941
4944
return true;
4942
4945
}
4943
4946
@@ -4966,11 +4969,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
4966
4969
list_del (& qe -> node );
4967
4970
kfree (qe );
4968
4971
4969
- mutex_lock (& q -> sysfs_lock );
4970
4972
elevator_switch (q , t );
4971
4973
/* drop the reference acquired in blk_mq_elv_switch_none */
4972
4974
elevator_put (t );
4973
- mutex_unlock (& q -> sysfs_lock );
4974
4975
}
4975
4976
4976
4977
static void __blk_mq_update_nr_hw_queues (struct blk_mq_tag_set * set ,
@@ -4990,8 +4991,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4990
4991
if (set -> nr_maps == 1 && nr_hw_queues == set -> nr_hw_queues )
4991
4992
return ;
4992
4993
4993
- list_for_each_entry (q , & set -> tag_list , tag_set_list )
4994
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
4995
+ mutex_lock (& q -> sysfs_dir_lock );
4996
+ mutex_lock (& q -> sysfs_lock );
4994
4997
blk_mq_freeze_queue (q );
4998
+ }
4995
4999
/*
4996
5000
* Switch IO scheduler to 'none', cleaning up the data associated
4997
5001
* with the previous scheduler. We will switch back once we are done
@@ -5047,8 +5051,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
5047
5051
list_for_each_entry (q , & set -> tag_list , tag_set_list )
5048
5052
blk_mq_elv_switch_back (& head , q );
5049
5053
5050
- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5054
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
5051
5055
blk_mq_unfreeze_queue (q );
5056
+ mutex_unlock (& q -> sysfs_lock );
5057
+ mutex_unlock (& q -> sysfs_dir_lock );
5058
+ }
5052
5059
5053
5060
/* Free the excess tags when nr_hw_queues shrink. */
5054
5061
for (i = set -> nr_hw_queues ; i < prev_nr_hw_queues ; i ++ )
0 commit comments