Skip to content

Commit 50ae066

Browse files
mfijalkoborkmann
authored andcommitted
ice, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full
When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK Rx queue being full. In such case, terminate the Rx processing that is being done on the current HW Rx ring and let the user space consume descriptors from XSK Rx queue so that there is room that driver can use later on. Introduce new internal return code ICE_XDP_EXIT that will indicate case described above. Note that it does not affect Tx processing that is bound to the same NAPI context, nor the other Rx rings. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220413153015.453864-6-maciej.fijalkowski@intel.com
1 parent d090c88 commit 50ae066

File tree

2 files changed

+20
-10
lines changed

2 files changed

+20
-10
lines changed

drivers/net/ethernet/intel/ice/ice_txrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ static inline int ice_skb_pad(void)
133133
#define ICE_XDP_CONSUMED BIT(0)
134134
#define ICE_XDP_TX BIT(1)
135135
#define ICE_XDP_REDIR BIT(2)
136+
#define ICE_XDP_EXIT BIT(3)
136137

137138
#define ICE_RX_DMA_ATTR \
138139
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -540,9 +540,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
540540

541541
if (likely(act == XDP_REDIRECT)) {
542542
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
543-
if (err)
544-
goto out_failure;
545-
return ICE_XDP_REDIR;
543+
if (!err)
544+
return ICE_XDP_REDIR;
545+
if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
546+
result = ICE_XDP_EXIT;
547+
else
548+
result = ICE_XDP_CONSUMED;
549+
goto out_failure;
546550
}
547551

548552
switch (act) {
@@ -553,15 +557,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
553557
if (result == ICE_XDP_CONSUMED)
554558
goto out_failure;
555559
break;
560+
case XDP_DROP:
561+
result = ICE_XDP_CONSUMED;
562+
break;
556563
default:
557564
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
558565
fallthrough;
559566
case XDP_ABORTED:
567+
result = ICE_XDP_CONSUMED;
560568
out_failure:
561569
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
562-
fallthrough;
563-
case XDP_DROP:
564-
result = ICE_XDP_CONSUMED;
565570
break;
566571
}
567572

@@ -629,12 +634,16 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
629634
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
630635

631636
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
632-
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)))
637+
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
633638
xdp_xmit |= xdp_res;
634-
else if (xdp_res == ICE_XDP_CONSUMED)
639+
} else if (xdp_res == ICE_XDP_EXIT) {
640+
failure = true;
641+
break;
642+
} else if (xdp_res == ICE_XDP_CONSUMED) {
635643
xsk_buff_free(xdp);
636-
else
644+
} else if (xdp_res == ICE_XDP_PASS) {
637645
goto construct_skb;
646+
}
638647

639648
total_rx_bytes += size;
640649
total_rx_packets++;
@@ -669,7 +678,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
669678
ice_receive_skb(rx_ring, skb, vlan_tag);
670679
}
671680

672-
failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
681+
failure |= !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
673682

674683
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
675684
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);

0 commit comments

Comments
 (0)