@@ -1024,19 +1024,27 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
1024
1024
if (!dev -> cache .wq )
1025
1025
return ;
1026
1026
1027
- cancel_delayed_work_sync (& dev -> cache .remove_ent_dwork );
1028
1027
mutex_lock (& dev -> cache .rb_lock );
1028
+ dev -> cache .disable = true;
1029
1029
for (node = rb_first (root ); node ; node = rb_next (node )) {
1030
1030
ent = rb_entry (node , struct mlx5_cache_ent , node );
1031
1031
xa_lock_irq (& ent -> mkeys );
1032
1032
ent -> disabled = true;
1033
1033
xa_unlock_irq (& ent -> mkeys );
1034
- cancel_delayed_work_sync (& ent -> dwork );
1035
1034
}
1035
+ mutex_unlock (& dev -> cache .rb_lock );
1036
+
1037
+ /*
1038
+ * After all entries are disabled and will not reschedule on WQ,
1039
+ * flush it and all async commands.
1040
+ */
1041
+ flush_workqueue (dev -> cache .wq );
1036
1042
1037
1043
mlx5_mkey_cache_debugfs_cleanup (dev );
1038
1044
mlx5_cmd_cleanup_async_ctx (& dev -> async_ctx );
1039
1045
1046
+ /* At this point all entries are disabled and have no concurrent work. */
1047
+ mutex_lock (& dev -> cache .rb_lock );
1040
1048
node = rb_first (root );
1041
1049
while (node ) {
1042
1050
ent = rb_entry (node , struct mlx5_cache_ent , node );
@@ -1815,6 +1823,10 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
1815
1823
}
1816
1824
1817
1825
mutex_lock (& cache -> rb_lock );
1826
+ if (cache -> disable ) {
1827
+ mutex_unlock (& cache -> rb_lock );
1828
+ return 0 ;
1829
+ }
1818
1830
ent = mkey_cache_ent_from_rb_key (dev , mr -> mmkey .rb_key );
1819
1831
if (ent ) {
1820
1832
if (ent -> rb_key .ndescs == mr -> mmkey .rb_key .ndescs ) {
0 commit comments