@@ -329,40 +329,32 @@ void
329329mt76_tx (struct mt76_phy * phy , struct ieee80211_sta * sta ,
330330 struct mt76_wcid * wcid , struct sk_buff * skb )
331331{
332- struct mt76_dev * dev = phy -> dev ;
333332 struct ieee80211_tx_info * info = IEEE80211_SKB_CB (skb );
334- struct ieee80211_hdr * hdr = (struct ieee80211_hdr * )skb -> data ;
335- struct mt76_queue * q ;
336- int qid = skb_get_queue_mapping (skb );
337333
338334 if (mt76_testmode_enabled (phy )) {
339335 ieee80211_free_txskb (phy -> hw , skb );
340336 return ;
341337 }
342338
343- if (WARN_ON (qid >= MT_TXQ_PSD )) {
344- qid = MT_TXQ_BE ;
345- skb_set_queue_mapping (skb , qid );
346- }
347-
348- if ((dev -> drv -> drv_flags & MT_DRV_HW_MGMT_TXQ ) &&
349- !(info -> flags & IEEE80211_TX_CTL_HW_80211_ENCAP ) &&
350- !ieee80211_is_data (hdr -> frame_control ) &&
351- !ieee80211_is_bufferable_mmpdu (skb )) {
352- qid = MT_TXQ_PSD ;
353- }
339+ if (WARN_ON (skb_get_queue_mapping (skb ) >= MT_TXQ_PSD ))
340+ skb_set_queue_mapping (skb , MT_TXQ_BE );
354341
355342 if (wcid && !(wcid -> tx_info & MT_WCID_TX_INFO_SET ))
356343 ieee80211_get_tx_rates (info -> control .vif , sta , skb ,
357344 info -> control .rates , 1 );
358345
359346 info -> hw_queue |= FIELD_PREP (MT_TX_HW_QUEUE_PHY , phy -> band_idx );
360- q = phy -> q_tx [qid ];
361347
362- spin_lock_bh (& q -> lock );
363- __mt76_tx_queue_skb (phy , qid , skb , wcid , sta , NULL );
364- dev -> queue_ops -> kick (dev , q );
365- spin_unlock_bh (& q -> lock );
348+ spin_lock_bh (& wcid -> tx_pending .lock );
349+ __skb_queue_tail (& wcid -> tx_pending , skb );
350+ spin_unlock_bh (& wcid -> tx_pending .lock );
351+
352+ spin_lock_bh (& phy -> tx_lock );
353+ if (list_empty (& wcid -> tx_list ))
354+ list_add_tail (& wcid -> tx_list , & phy -> tx_list );
355+ spin_unlock_bh (& phy -> tx_lock );
356+
357+ mt76_worker_schedule (& phy -> dev -> tx_worker );
366358}
367359EXPORT_SYMBOL_GPL (mt76_tx );
368360
@@ -593,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
593585}
594586EXPORT_SYMBOL_GPL (mt76_txq_schedule );
595587
588+ static int
589+ mt76_txq_schedule_pending_wcid (struct mt76_phy * phy , struct mt76_wcid * wcid )
590+ {
591+ struct mt76_dev * dev = phy -> dev ;
592+ struct ieee80211_sta * sta ;
593+ struct mt76_queue * q ;
594+ struct sk_buff * skb ;
595+ int ret = 0 ;
596+
597+ spin_lock (& wcid -> tx_pending .lock );
598+ while ((skb = skb_peek (& wcid -> tx_pending )) != NULL ) {
599+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr * )skb -> data ;
600+ struct ieee80211_tx_info * info = IEEE80211_SKB_CB (skb );
601+ int qid = skb_get_queue_mapping (skb );
602+
603+ if ((dev -> drv -> drv_flags & MT_DRV_HW_MGMT_TXQ ) &&
604+ !(info -> flags & IEEE80211_TX_CTL_HW_80211_ENCAP ) &&
605+ !ieee80211_is_data (hdr -> frame_control ) &&
606+ !ieee80211_is_bufferable_mmpdu (skb ))
607+ qid = MT_TXQ_PSD ;
608+
609+ q = phy -> q_tx [qid ];
610+ if (mt76_txq_stopped (q )) {
611+ ret = -1 ;
612+ break ;
613+ }
614+
615+ __skb_unlink (skb , & wcid -> tx_pending );
616+ spin_unlock (& wcid -> tx_pending .lock );
617+
618+ sta = wcid_to_sta (wcid );
619+ spin_lock (& q -> lock );
620+ __mt76_tx_queue_skb (phy , qid , skb , wcid , sta , NULL );
621+ dev -> queue_ops -> kick (dev , q );
622+ spin_unlock (& q -> lock );
623+
624+ spin_lock (& wcid -> tx_pending .lock );
625+ }
626+ spin_unlock (& wcid -> tx_pending .lock );
627+
628+ return ret ;
629+ }
630+
631+ static void mt76_txq_schedule_pending (struct mt76_phy * phy )
632+ {
633+ if (list_empty (& phy -> tx_list ))
634+ return ;
635+
636+ local_bh_disable ();
637+ rcu_read_lock ();
638+
639+ spin_lock (& phy -> tx_lock );
640+ while (!list_empty (& phy -> tx_list )) {
641+ struct mt76_wcid * wcid = NULL ;
642+ int ret ;
643+
644+ wcid = list_first_entry (& phy -> tx_list , struct mt76_wcid , tx_list );
645+ list_del_init (& wcid -> tx_list );
646+
647+ spin_unlock (& phy -> tx_lock );
648+ ret = mt76_txq_schedule_pending_wcid (phy , wcid );
649+ spin_lock (& phy -> tx_lock );
650+
651+ if (ret ) {
652+ if (list_empty (& wcid -> tx_list ))
653+ list_add_tail (& wcid -> tx_list , & phy -> tx_list );
654+ break ;
655+ }
656+ }
657+ spin_unlock (& phy -> tx_lock );
658+
659+ rcu_read_unlock ();
660+ local_bh_enable ();
661+ }
662+
596663void mt76_txq_schedule_all (struct mt76_phy * phy )
597664{
598665 int i ;
599666
667+ mt76_txq_schedule_pending (phy );
600668 for (i = 0 ; i <= MT_TXQ_BK ; i ++ )
601669 mt76_txq_schedule (phy , i );
602670}
0 commit comments