@@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
307307
308308 edev -> ops -> get_vport_stats (edev -> cdev , & stats );
309309
310+ spin_lock (& edev -> stats_lock );
311+
310312 p_common -> no_buff_discards = stats .common .no_buff_discards ;
311313 p_common -> packet_too_big_discard = stats .common .packet_too_big_discard ;
312314 p_common -> ttl0_discard = stats .common .ttl0_discard ;
@@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
404406 p_ah -> tx_1519_to_max_byte_packets =
405407 stats .ah .tx_1519_to_max_byte_packets ;
406408 }
409+
410+ spin_unlock (& edev -> stats_lock );
407411}
408412
409413static void qede_get_stats64 (struct net_device * dev ,
@@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev,
412416 struct qede_dev * edev = netdev_priv (dev );
413417 struct qede_stats_common * p_common ;
414418
415- qede_fill_by_demand_stats (edev );
416419 p_common = & edev -> stats .common ;
417420
421+ spin_lock (& edev -> stats_lock );
422+
418423 stats -> rx_packets = p_common -> rx_ucast_pkts + p_common -> rx_mcast_pkts +
419424 p_common -> rx_bcast_pkts ;
420425 stats -> tx_packets = p_common -> tx_ucast_pkts + p_common -> tx_mcast_pkts +
@@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev,
434439 stats -> collisions = edev -> stats .bb .tx_total_collisions ;
435440 stats -> rx_crc_errors = p_common -> rx_crc_errors ;
436441 stats -> rx_frame_errors = p_common -> rx_align_errors ;
442+
443+ spin_unlock (& edev -> stats_lock );
437444}
438445
439446#ifdef CONFIG_QED_SRIOV
@@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev)
10631070 rtnl_unlock ();
10641071}
10651072
1073+ static void qede_periodic_task (struct work_struct * work )
1074+ {
1075+ struct qede_dev * edev = container_of (work , struct qede_dev ,
1076+ periodic_task .work );
1077+
1078+ qede_fill_by_demand_stats (edev );
1079+ schedule_delayed_work (& edev -> periodic_task , edev -> stats_coal_ticks );
1080+ }
1081+
1082+ static void qede_init_periodic_task (struct qede_dev * edev )
1083+ {
1084+ INIT_DELAYED_WORK (& edev -> periodic_task , qede_periodic_task );
1085+ spin_lock_init (& edev -> stats_lock );
1086+ edev -> stats_coal_usecs = USEC_PER_SEC ;
1087+ edev -> stats_coal_ticks = usecs_to_jiffies (USEC_PER_SEC );
1088+ }
1089+
10661090static void qede_sp_task (struct work_struct * work )
10671091{
10681092 struct qede_dev * edev = container_of (work , struct qede_dev ,
@@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work)
10821106 */
10831107
10841108 if (test_and_clear_bit (QEDE_SP_RECOVERY , & edev -> sp_flags )) {
1109+ cancel_delayed_work_sync (& edev -> periodic_task );
10851110#ifdef CONFIG_QED_SRIOV
10861111 /* SRIOV must be disabled outside the lock to avoid a deadlock.
10871112 * The recovery of the active VFs is currently not supported.
@@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
12721297 */
12731298 INIT_DELAYED_WORK (& edev -> sp_task , qede_sp_task );
12741299 mutex_init (& edev -> qede_lock );
1300+ qede_init_periodic_task (edev );
12751301
12761302 rc = register_netdev (edev -> ndev );
12771303 if (rc ) {
@@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
12961322 edev -> rx_copybreak = QEDE_RX_HDR_SIZE ;
12971323
12981324 qede_log_probe (edev );
1325+
1326+ /* retain user config (for example - after recovery) */
1327+ if (edev -> stats_coal_usecs )
1328+ schedule_delayed_work (& edev -> periodic_task , 0 );
1329+
12991330 return 0 ;
13001331
13011332err4 :
@@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
13641395 unregister_netdev (ndev );
13651396
13661397 cancel_delayed_work_sync (& edev -> sp_task );
1398+ cancel_delayed_work_sync (& edev -> periodic_task );
13671399
13681400 edev -> ops -> common -> set_power_state (cdev , PCI_D0 );
13691401
0 commit comments