@@ -59,13 +59,12 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
5959
6060static void efx_tx_maybe_stop_queue (struct efx_tx_queue * txq1 )
6161{
62- /* We need to consider both queues that the net core sees as one */
63- struct efx_tx_queue * txq2 = efx_tx_queue_partner (txq1 );
62+ /* We need to consider all queues that the net core sees as one */
6463 struct efx_nic * efx = txq1 -> efx ;
64+ struct efx_tx_queue * txq2 ;
6565 unsigned int fill_level ;
6666
67- fill_level = max (txq1 -> insert_count - txq1 -> old_read_count ,
68- txq2 -> insert_count - txq2 -> old_read_count );
67+ fill_level = efx_channel_tx_old_fill_level (txq1 -> channel );
6968 if (likely (fill_level < efx -> txq_stop_thresh ))
7069 return ;
7170
@@ -85,11 +84,10 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
8584 */
8685 netif_tx_stop_queue (txq1 -> core_txq );
8786 smp_mb ();
88- txq1 -> old_read_count = READ_ONCE ( txq1 -> read_count );
89- txq2 -> old_read_count = READ_ONCE (txq2 -> read_count );
87+ efx_for_each_channel_tx_queue ( txq2 , txq1 -> channel )
88+ txq2 -> old_read_count = READ_ONCE (txq2 -> read_count );
9089
91- fill_level = max (txq1 -> insert_count - txq1 -> old_read_count ,
92- txq2 -> insert_count - txq2 -> old_read_count );
90+ fill_level = efx_channel_tx_old_fill_level (txq1 -> channel );
9391 EFX_WARN_ON_ONCE_PARANOID (fill_level >= efx -> txq_entries );
9492 if (likely (fill_level < efx -> txq_stop_thresh )) {
9593 smp_mb ();
@@ -266,8 +264,45 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
266264 ++ tx_queue -> insert_count ;
267265 return 0 ;
268266}
267+
268+ /* Decide whether we can use TX PIO, ie. write packet data directly into
269+ * a buffer on the device. This can reduce latency at the expense of
270+ * throughput, so we only do this if both hardware and software TX rings
271+ * are empty, including all queues for the channel. This also ensures that
272+ * only one packet at a time can be using the PIO buffer. If the xmit_more
273+ * flag is set then we don't use this - there'll be another packet along
274+ * shortly and we want to hold off the doorbell.
275+ */
276+ static bool efx_tx_may_pio (struct efx_tx_queue * tx_queue )
277+ {
278+ struct efx_channel * channel = tx_queue -> channel ;
279+
280+ if (!tx_queue -> piobuf )
281+ return false;
282+
283+ EFX_WARN_ON_ONCE_PARANOID (!channel -> efx -> type -> option_descriptors );
284+
285+ efx_for_each_channel_tx_queue (tx_queue , channel )
286+ if (!efx_nic_tx_is_empty (tx_queue , tx_queue -> packet_write_count ))
287+ return false;
288+
289+ return true;
290+ }
269291#endif /* EFX_USE_PIO */
270292
293+ /* Send any pending traffic for a channel. xmit_more is shared across all
294+ * queues for a channel, so we must check all of them.
295+ */
296+ static void efx_tx_send_pending (struct efx_channel * channel )
297+ {
298+ struct efx_tx_queue * q ;
299+
300+ efx_for_each_channel_tx_queue (q , channel ) {
301+ if (q -> xmit_pending )
302+ efx_nic_push_buffers (q );
303+ }
304+ }
305+
271306/*
272307 * Add a socket buffer to a TX queue
273308 *
@@ -315,7 +350,7 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
315350 goto err ;
316351#ifdef EFX_USE_PIO
317352 } else if (skb_len <= efx_piobuf_size && !xmit_more &&
318- efx_nic_may_tx_pio (tx_queue )) {
353+ efx_tx_may_pio (tx_queue )) {
319354 /* Use PIO for short packets with an empty queue. */
320355 if (efx_enqueue_skb_pio (tx_queue , skb ))
321356 goto err ;
@@ -336,21 +371,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
336371
337372 efx_tx_maybe_stop_queue (tx_queue );
338373
339- /* Pass off to hardware */
340- if (__netdev_tx_sent_queue (tx_queue -> core_txq , skb_len , xmit_more )) {
341- struct efx_tx_queue * txq2 = efx_tx_queue_partner (tx_queue );
342-
343- /* There could be packets left on the partner queue if
344- * xmit_more was set. If we do not push those they
345- * could be left for a long time and cause a netdev watchdog.
346- */
347- if (txq2 -> xmit_more_available )
348- efx_nic_push_buffers (txq2 );
374+ tx_queue -> xmit_pending = true;
349375
350- efx_nic_push_buffers (tx_queue );
351- } else {
352- tx_queue -> xmit_more_available = xmit_more ;
353- }
376+ /* Pass off to hardware */
377+ if (__netdev_tx_sent_queue (tx_queue -> core_txq , skb_len , xmit_more ))
378+ efx_tx_send_pending (tx_queue -> channel );
354379
355380 if (segments ) {
356381 tx_queue -> tso_bursts ++ ;
@@ -371,14 +396,8 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
371396 * on this queue or a partner queue then we need to push here to get the
372397 * previous packets out.
373398 */
374- if (!xmit_more ) {
375- struct efx_tx_queue * txq2 = efx_tx_queue_partner (tx_queue );
376-
377- if (txq2 -> xmit_more_available )
378- efx_nic_push_buffers (txq2 );
379-
380- efx_nic_push_buffers (tx_queue );
381- }
399+ if (!xmit_more )
400+ efx_tx_send_pending (tx_queue -> channel );
382401
383402 return NETDEV_TX_OK ;
384403}
@@ -489,18 +508,24 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
489508
490509 EFX_WARN_ON_PARANOID (!netif_device_present (net_dev ));
491510
492- /* PTP "event" packet */
493- if (unlikely (efx_xmit_with_hwtstamp (skb )) &&
494- unlikely (efx_ptp_is_ptp_tx (efx , skb ))) {
495- return efx_ptp_tx (efx , skb );
496- }
497-
498511 index = skb_get_queue_mapping (skb );
499512 type = skb -> ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0 ;
500513 if (index >= efx -> n_tx_channels ) {
501514 index -= efx -> n_tx_channels ;
502515 type |= EFX_TXQ_TYPE_HIGHPRI ;
503516 }
517+
518+ /* PTP "event" packet */
519+ if (unlikely (efx_xmit_with_hwtstamp (skb )) &&
520+ unlikely (efx_ptp_is_ptp_tx (efx , skb ))) {
521+ /* There may be existing transmits on the channel that are
522+ * waiting for this packet to trigger the doorbell write.
523+ * We need to send the packets at this point.
524+ */
525+ efx_tx_send_pending (efx_get_tx_channel (efx , index ));
526+ return efx_ptp_tx (efx , skb );
527+ }
528+
504529 tx_queue = efx_get_tx_queue (efx , index , type );
505530
506531 return __efx_enqueue_skb (tx_queue , skb );
0 commit comments