Skip to content

Commit

Permalink
iavf: Implement XDP_PASS path in AF_XDP processing
Browse files Browse the repository at this point in the history
Construct skb and fill in its fields, when AF_XDP
is enabled on the ring, if XDP program returns XDP_PASS.
(will be fixed up).

Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
  • Loading branch information
walking-machine authored and alobakin committed Mar 1, 2023
1 parent 6f252a5 commit 8862fe7
Show file tree
Hide file tree
Showing 2 changed files with 79 additions and 2 deletions.
8 changes: 8 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,14 @@ DEFINE_EVENT(

TP_ARGS(ring, desc, skb));

DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_zc_rx,
TP_PROTO(struct iavf_ring *ring,
union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),

TP_ARGS(ring, desc, skb));

DECLARE_EVENT_CLASS(
iavf_xmit_template,

Expand Down
73 changes: 71 additions & 2 deletions drivers/net/ethernet/intel/iavf/iavf_xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1003,6 +1003,8 @@ iavf_run_xdp_zc(struct iavf_ring *rx_ring, struct xdp_buff *xdp,
}

switch (xdp_act) {
case XDP_PASS:
break;
case XDP_TX:
err = iavf_xmit_xdp_buff_zc(xdp, xdp_ring);
if (unlikely(err))
Expand All @@ -1028,6 +1030,42 @@ iavf_run_xdp_zc(struct iavf_ring *rx_ring, struct xdp_buff *xdp,
return xdp_act;
}

/**
* iavf_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
iavf_construct_skb_zc(struct iavf_ring *rx_ring, struct xdp_buff *xdp)
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;

net_prefetch(xdp->data_meta);

skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;

memcpy(__skb_put(skb, totalsize), xdp->data_meta,
ALIGN(totalsize, sizeof(long)));

if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}

xsk_buff_free(xdp);

return skb;
}

/**
* iavf_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
Expand All @@ -1053,6 +1091,8 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
while (likely(cleaned_count < budget)) {
union iavf_rx_desc *rx_desc;
struct xdp_buff *xdp;
unsigned int xdp_act;
struct sk_buff *skb;
unsigned int size;
u64 qword;

Expand Down Expand Up @@ -1087,8 +1127,10 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
xsk_buff_set_size(xdp, size);
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);

iavf_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring,
&rxq_xdp_act);
xdp_act = iavf_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring,
&rxq_xdp_act);
if (xdp_act == XDP_PASS)
goto construct_skb;

if (unlikely(rxq_xdp_act & IAVF_RXQ_XDP_ACT_STOP_NOW)) {
failure = true;
Expand All @@ -1102,6 +1144,33 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
cleaned_count++;
if (unlikely(++ntc == ring_size))
ntc = 0;

continue;

construct_skb:
skb = iavf_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
}

cleaned_count++;
if (unlikely(++ntc == ring_size))
ntc = 0;

prefetch(rx_desc);

/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;

/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, qword);

iavf_trace(clean_rx_irq_zc_rx, rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
napi_gro_receive(&rx_ring->q_vector->napi, skb);

total_rx_packets++;
}

rx_ring->next_to_clean = ntc;
Expand Down

0 comments on commit 8862fe7

Please sign in to comment.