@@ -185,20 +185,21 @@ static void health_care(struct work_struct *work)
185
185
struct mlx5_core_health * health ;
186
186
struct mlx5_core_dev * dev ;
187
187
struct mlx5_priv * priv ;
188
+ unsigned long flags ;
188
189
189
190
health = container_of (work , struct mlx5_core_health , work );
190
191
priv = container_of (health , struct mlx5_priv , health );
191
192
dev = container_of (priv , struct mlx5_core_dev , priv );
192
193
mlx5_core_warn (dev , "handling bad device here\n" );
193
194
mlx5_handle_bad_state (dev );
194
195
195
- spin_lock (& health -> wq_lock );
196
+ spin_lock_irqsave (& health -> wq_lock , flags );
196
197
if (!test_bit (MLX5_DROP_NEW_HEALTH_WORK , & health -> flags ))
197
198
schedule_delayed_work (& health -> recover_work , recover_delay );
198
199
else
199
200
dev_err (& dev -> pdev -> dev ,
200
201
"new health works are not permitted at this stage\n" );
201
- spin_unlock (& health -> wq_lock );
202
+ spin_unlock_irqrestore (& health -> wq_lock , flags );
202
203
}
203
204
204
205
static const char * hsynd_str (u8 synd )
@@ -269,6 +270,20 @@ static unsigned long get_next_poll_jiffies(void)
269
270
return next ;
270
271
}
271
272
273
+ void mlx5_trigger_health_work (struct mlx5_core_dev * dev )
274
+ {
275
+ struct mlx5_core_health * health = & dev -> priv .health ;
276
+ unsigned long flags ;
277
+
278
+ spin_lock_irqsave (& health -> wq_lock , flags );
279
+ if (!test_bit (MLX5_DROP_NEW_HEALTH_WORK , & health -> flags ))
280
+ queue_work (health -> wq , & health -> work );
281
+ else
282
+ dev_err (& dev -> pdev -> dev ,
283
+ "new health works are not permitted at this stage\n" );
284
+ spin_unlock_irqrestore (& health -> wq_lock , flags );
285
+ }
286
+
272
287
static void poll_health (unsigned long data )
273
288
{
274
289
struct mlx5_core_dev * dev = (struct mlx5_core_dev * )data ;
@@ -297,13 +312,7 @@ static void poll_health(unsigned long data)
297
312
if (in_fatal (dev ) && !health -> sick ) {
298
313
health -> sick = true;
299
314
print_health_info (dev );
300
- spin_lock (& health -> wq_lock );
301
- if (!test_bit (MLX5_DROP_NEW_HEALTH_WORK , & health -> flags ))
302
- queue_work (health -> wq , & health -> work );
303
- else
304
- dev_err (& dev -> pdev -> dev ,
305
- "new health works are not permitted at this stage\n" );
306
- spin_unlock (& health -> wq_lock );
315
+ mlx5_trigger_health_work (dev );
307
316
}
308
317
}
309
318
@@ -333,10 +342,11 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
333
342
void mlx5_drain_health_wq (struct mlx5_core_dev * dev )
334
343
{
335
344
struct mlx5_core_health * health = & dev -> priv .health ;
345
+ unsigned long flags ;
336
346
337
- spin_lock (& health -> wq_lock );
347
+ spin_lock_irqsave (& health -> wq_lock , flags );
338
348
set_bit (MLX5_DROP_NEW_HEALTH_WORK , & health -> flags );
339
- spin_unlock (& health -> wq_lock );
349
+ spin_unlock_irqrestore (& health -> wq_lock , flags );
340
350
cancel_delayed_work_sync (& health -> recover_work );
341
351
cancel_work_sync (& health -> work );
342
352
}
0 commit comments