@@ -1025,19 +1025,27 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
1025
1025
if (!dev -> cache .wq )
1026
1026
return ;
1027
1027
1028
- cancel_delayed_work_sync (& dev -> cache .remove_ent_dwork );
1029
1028
mutex_lock (& dev -> cache .rb_lock );
1029
+ dev -> cache .disable = true;
1030
1030
for (node = rb_first (root ); node ; node = rb_next (node )) {
1031
1031
ent = rb_entry (node , struct mlx5_cache_ent , node );
1032
1032
xa_lock_irq (& ent -> mkeys );
1033
1033
ent -> disabled = true;
1034
1034
xa_unlock_irq (& ent -> mkeys );
1035
- cancel_delayed_work_sync (& ent -> dwork );
1036
1035
}
1036
+ mutex_unlock (& dev -> cache .rb_lock );
1037
+
1038
+ /*
1039
+ * After all entries are disabled and will not reschedule on WQ,
1040
+ * flush it and all async commands.
1041
+ */
1042
+ flush_workqueue (dev -> cache .wq );
1037
1043
1038
1044
mlx5_mkey_cache_debugfs_cleanup (dev );
1039
1045
mlx5_cmd_cleanup_async_ctx (& dev -> async_ctx );
1040
1046
1047
+ /* At this point all entries are disabled and have no concurrent work. */
1048
+ mutex_lock (& dev -> cache .rb_lock );
1041
1049
node = rb_first (root );
1042
1050
while (node ) {
1043
1051
ent = rb_entry (node , struct mlx5_cache_ent , node );
@@ -1822,6 +1830,10 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
1822
1830
}
1823
1831
1824
1832
mutex_lock (& cache -> rb_lock );
1833
+ if (cache -> disable ) {
1834
+ mutex_unlock (& cache -> rb_lock );
1835
+ return 0 ;
1836
+ }
1825
1837
ent = mkey_cache_ent_from_rb_key (dev , mr -> mmkey .rb_key );
1826
1838
if (ent ) {
1827
1839
if (ent -> rb_key .ndescs == mr -> mmkey .rb_key .ndescs ) {
0 commit comments