Skip to content

Commit

Permalink
iavf: Implement AF_XDP RX processing
Browse files Browse the repository at this point in the history
Implement RX packet processing specific to AF_XDP ZC.
All actions except XDP_PASS are supported, the skb path will
be implemented in later patches.

Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
  • Loading branch information
walking-machine authored and alobakin committed Mar 1, 2023
1 parent 585466d commit ad45877
Show file tree
Hide file tree
Showing 7 changed files with 565 additions and 54 deletions.
27 changes: 21 additions & 6 deletions drivers/net/ethernet/intel/iavf/iavf_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -775,14 +775,29 @@ void iavf_configure_rx_ring(struct iavf_adapter *adapter,
rx_ring->queue_index,
rx_ring->q_vector->napi.napi_id);

err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
rx_ring->pool);
if (err)
netdev_err(adapter->netdev, "Could not register XDP memory model for RX queue %u, error: %d\n",
queue_idx, err);
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
netdev_err(adapter->netdev, "xdp_rxq_info_reg_mem_model returned %d\n",
err);

xsk_pool_set_rxq_info(rx_ring->xsk_pool, &rx_ring->xdp_rxq);

iavf_check_alloc_rx_buffers_zc(adapter, rx_ring);
} else {
err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
MEM_TYPE_PAGE_POOL,
rx_ring->pool);
if (err)
netdev_err(adapter->netdev, "Could not register XDP memory model for RX queue %u, error: %d\n",
queue_idx, err);

iavf_alloc_rx_pages(rx_ring);
}

RCU_INIT_POINTER(rx_ring->xdp_prog, adapter->xdp_prog);
iavf_alloc_rx_pages(rx_ring);
}

/**
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,14 @@ DEFINE_EVENT(

TP_ARGS(ring, desc, skb));

DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_zc,
TP_PROTO(struct iavf_ring *ring,
union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),

TP_ARGS(ring, desc, skb));

DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
Expand Down
94 changes: 49 additions & 45 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,11 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring)
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;

if (tx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
tx_ring->dev = tx_ring->xsk_pool->dev;
tx_ring->flags &= ~IAVF_TXRX_FLAGS_XSK;
}

if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring->desc, tx_ring->dma);
Expand Down Expand Up @@ -697,24 +702,10 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
return -ENOMEM;
}

/**
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
static void iavf_clean_rx_pages(struct iavf_ring *rx_ring)
{
const struct page_pool_params *pp = &rx_ring->pool->p;

/* ring already cleared, nothing to do */
if (!rx_ring->rx_pages)
return;

if (rx_ring->skb) {
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
}

/* Free all the Rx ring sk_buffs */
for (u32 i = 0; i < rx_ring->count; i++) {
struct page *page = rx_ring->rx_pages[i];

Expand All @@ -731,6 +722,27 @@ void iavf_clean_rx_ring(struct iavf_ring *rx_ring)

page_pool_put_full_page(rx_ring->pool, page, false);
}
}

/**
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
/* ring already cleared, nothing to do */
if (!rx_ring->rx_pages)
return;

if (rx_ring->skb) {
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
}

if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK)
iavf_xsk_clean_rx_ring(rx_ring);
else
iavf_clean_rx_pages(rx_ring);

rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
Expand All @@ -744,7 +756,7 @@ void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
struct device *dev = rx_ring->pool->p.dev;
struct device *dev;

iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_pages);
Expand All @@ -754,7 +766,14 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);

page_pool_destroy(rx_ring->pool);
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
dev = rx_ring->xsk_pool->dev;
rx_ring->flags &= ~IAVF_TXRX_FLAGS_XSK;
} else {
dev = rx_ring->pool->p.dev;
page_pool_destroy(rx_ring->pool);
}

rx_ring->dev = dev;

if (rx_ring->desc) {
Expand Down Expand Up @@ -828,6 +847,8 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)

/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_pages);

/* Both iavf_ring::rx_pages and ::xdp_buff are arrays of pointers */
rx_ring->rx_pages = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_pages),
GFP_KERNEL);
if (!rx_ring->rx_pages)
Expand All @@ -847,9 +868,13 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
goto err;
}

ret = iavf_rx_page_pool_create(rx_ring);
if (ret)
goto err_free_dma;
iavf_xsk_setup_rx_ring(rx_ring);

if (!(rx_ring->flags & IAVF_TXRX_FLAGS_XSK)) {
ret = iavf_rx_page_pool_create(rx_ring);
if (ret)
goto err_free_dma;
}

rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
Expand All @@ -865,24 +890,6 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
return ret;
}

/**
* iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
**/
static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;

/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(val, rx_ring->tail);
}

/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
Expand Down Expand Up @@ -1417,12 +1424,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
if (unlikely(to_refill >= IAVF_RX_BUFFER_WRITE))
cleaned_count = budget;

u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
iavf_update_rx_ring_stats(rx_ring, total_rx_bytes, total_rx_packets);

return cleaned_count;
}
Expand Down Expand Up @@ -1582,7 +1584,9 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
rcu_read_lock();

iavf_for_each_ring(ring, q_vector->rx) {
int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
int cleaned = !!(ring->flags & IAVF_TXRX_FLAGS_XSK) ?
iavf_clean_rx_irq_zc(ring, budget_per_ring) :
iavf_clean_rx_irq(ring, budget_per_ring);

work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
Expand Down
42 changes: 42 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ struct iavf_ring {
struct net_device *netdev; /* netdev ring maps to */
union {
struct iavf_tx_buffer *tx_bi;
struct xdp_buff **xdp_buff;
struct page **rx_pages;
};
DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
Expand Down Expand Up @@ -505,8 +506,49 @@ static inline void __iavf_update_tx_ring_stats(struct iavf_ring *tx_ring,
#define iavf_update_tx_ring_stats(r, p, b) \
__iavf_update_tx_ring_stats(r, &(r)->q_vector->tx, p, b)

/**
* iavf_update_rx_ring_stats - Update RX ring stats
* @rx_ring: ring to bump
* @rc: TODO
* @rx_bytes: number of bytes processed since last update
* @rx_packets: number of packets processed since last update
**/
static inline void __iavf_update_rx_ring_stats(struct iavf_ring *rx_ring,
struct iavf_ring_container *rc,
u32 rx_bytes, u32 rx_packets)
{
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += rx_packets;
rx_ring->stats.bytes += rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
rc->total_packets += rx_packets;
rc->total_bytes += rx_bytes;
}

#define iavf_update_rx_ring_stats(r, p, b) \
__iavf_update_rx_ring_stats(r, &(r)->q_vector->rx, p, b)

/**
* iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
**/
static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;

/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(val, rx_ring->tail);
}

#define IAVF_RXQ_XDP_ACT_FINALIZE_TX BIT(0)
#define IAVF_RXQ_XDP_ACT_FINALIZE_REDIR BIT(1)
#define IAVF_RXQ_XDP_ACT_STOP_NOW BIT(2)

/**
* iavf_finalize_xdp_rx - Finalize XDP actions once per RX ring clean
Expand Down
14 changes: 11 additions & 3 deletions drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
Original file line number Diff line number Diff line change
Expand Up @@ -411,8 +411,8 @@ static void iavf_set_qp_config_info(struct virtchnl_queue_pair_info *vqpi,
bool xdp_pair)
{
struct iavf_ring *rxq = &adapter->rx_rings[queue_index];
const struct page_pool_params *pp = &rxq->pool->p;
struct iavf_ring *txq;
u32 hr, max_len;
int xdpq_idx;

if (xdp_pair) {
Expand All @@ -433,12 +433,20 @@ static void iavf_set_qp_config_info(struct virtchnl_queue_pair_info *vqpi,
return;
}

max_frame = min_not_zero(max_frame, IAVF_MAX_RX_FRAME_LEN(pp->offset));
if (rxq->flags & IAVF_TXRX_FLAGS_XSK) {
hr = xsk_pool_get_headroom(rxq->xsk_pool);
max_len = xsk_pool_get_rx_frame_size(rxq->xsk_pool);
} else {
hr = rxq->pool->p.offset;
max_len = rxq->pool->p.max_len;
}

max_frame = min_not_zero(max_frame, IAVF_MAX_RX_FRAME_LEN(hr));

vqpi->rxq.ring_len = rxq->count;
vqpi->rxq.dma_ring_addr = rxq->dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = pp->max_len;
vqpi->rxq.databuffer_size = max_len;
}

/**
Expand Down
Loading

0 comments on commit ad45877

Please sign in to comment.