Skip to content

Commit c465576

Browse files
magnus-karlssonborkmann
authored andcommitted
xsk: i40e: ice: ixgbe: mlx5: Rename xsk zero-copy driver interfaces
Rename the AF_XDP zero-copy driver interface functions to better reflect what they do after the replacement of umems with buffer pools in the previous commit. Mostly it is about replacing the umem name from the function names with xsk_buff and also have them take the a buffer pool pointer instead of a umem. The various ring functions have also been renamed in the process so that they have the same naming convention as the internal functions in xsk_queue.h. This so that it will be clearer what they do and also for consistency. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-3-git-send-email-magnus.karlsson@intel.com
1 parent 1742b3d commit c465576

File tree

19 files changed

+179
-167
lines changed

19 files changed

+179
-167
lines changed

drivers/net/ethernet/intel/i40e/i40e_main.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3138,7 +3138,7 @@ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
31383138
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
31393139
return NULL;
31403140

3141-
return xdp_get_xsk_pool_from_qid(ring->vsi->netdev, qid);
3141+
return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
31423142
}
31433143

31443144
/**
@@ -3286,7 +3286,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
32863286
if (ret)
32873287
return ret;
32883288
ring->rx_buf_len =
3289-
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
3289+
xsk_pool_get_rx_frame_size(ring->xsk_pool);
32903290
/* For AF_XDP ZC, we disallow packets to span on
32913291
* multiple buffers, thus letting us skip that
32923292
* handling in the fast-path.
@@ -3370,7 +3370,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
33703370
writel(0, ring->tail);
33713371

33723372
if (ring->xsk_pool) {
3373-
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
3373+
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
33743374
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
33753375
} else {
33763376
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 16 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,7 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
5555
qid >= netdev->real_num_tx_queues)
5656
return -EINVAL;
5757

58-
err = xsk_buff_dma_map(pool->umem, &vsi->back->pdev->dev,
59-
I40E_RX_DMA_ATTR);
58+
err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
6059
if (err)
6160
return err;
6261

@@ -97,7 +96,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
9796
bool if_running;
9897
int err;
9998

100-
pool = xdp_get_xsk_pool_from_qid(netdev, qid);
99+
pool = xsk_get_pool_from_qid(netdev, qid);
101100
if (!pool)
102101
return -EINVAL;
103102

@@ -110,7 +109,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
110109
}
111110

112111
clear_bit(qid, vsi->af_xdp_zc_qps);
113-
xsk_buff_dma_unmap(pool->umem, I40E_RX_DMA_ATTR);
112+
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
114113

115114
if (if_running) {
116115
err = i40e_queue_pair_enable(vsi, qid);
@@ -196,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
196195
rx_desc = I40E_RX_DESC(rx_ring, ntu);
197196
bi = i40e_rx_bi(rx_ring, ntu);
198197
do {
199-
xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
198+
xdp = xsk_buff_alloc(rx_ring->xsk_pool);
200199
if (!xdp) {
201200
ok = false;
202201
goto no_buffers;
@@ -363,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
363362
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
364363
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
365364

366-
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
365+
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
367366
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
368-
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
367+
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
369368
else
370-
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
369+
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
371370

372371
return (int)total_rx_packets;
373372
}
@@ -390,12 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
390389
dma_addr_t dma;
391390

392391
while (budget-- > 0) {
393-
if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
392+
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
394393
break;
395394

396-
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem,
397-
desc.addr);
398-
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
395+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
396+
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
399397
desc.len);
400398

401399
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -422,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
422420
I40E_TXD_QW1_CMD_SHIFT);
423421
i40e_xdp_ring_update_tail(xdp_ring);
424422

425-
xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
423+
xsk_tx_release(xdp_ring->xsk_pool);
426424
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
427425
}
428426

@@ -494,13 +492,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
494492
tx_ring->next_to_clean -= tx_ring->count;
495493

496494
if (xsk_frames)
497-
xsk_umem_complete_tx(bp->umem, xsk_frames);
495+
xsk_tx_completed(bp, xsk_frames);
498496

499497
i40e_arm_wb(tx_ring, vsi, completed_frames);
500498

501499
out_xmit:
502-
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_pool->umem))
503-
xsk_set_tx_need_wakeup(tx_ring->xsk_pool->umem);
500+
if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
501+
xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
504502

505503
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
506504
}
@@ -591,7 +589,7 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
591589
}
592590

593591
if (xsk_frames)
594-
xsk_umem_complete_tx(bp->umem, xsk_frames);
592+
xsk_tx_completed(bp, xsk_frames);
595593
}
596594

597595
/**
@@ -607,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
607605
int i;
608606

609607
for (i = 0; i < vsi->num_queue_pairs; i++) {
610-
if (xdp_get_xsk_pool_from_qid(netdev, i))
608+
if (xsk_get_pool_from_qid(netdev, i))
611609
return true;
612610
}
613611

drivers/net/ethernet/intel/ice/ice_base.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
313313
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
314314

315315
ring->rx_buf_len =
316-
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
316+
xsk_pool_get_rx_frame_size(ring->xsk_pool);
317317
/* For AF_XDP ZC, we disallow packets to span on
318318
* multiple buffers, thus letting us skip that
319319
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
324324
NULL);
325325
if (err)
326326
return err;
327-
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
327+
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
328328

329329
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
330330
ring->q_index);
@@ -418,7 +418,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
418418
writel(0, ring->tail);
419419

420420
if (ring->xsk_pool) {
421-
if (!xsk_buff_can_alloc(ring->xsk_pool->umem, num_bufs)) {
421+
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
422422
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
423423
num_bufs, ring->q_index);
424424
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
311311
!vsi->xsk_pools[qid])
312312
return -EINVAL;
313313

314-
xsk_buff_dma_unmap(vsi->xsk_pools[qid]->umem, ICE_RX_DMA_ATTR);
314+
xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
315315
ice_xsk_remove_pool(vsi, qid);
316316

317317
return 0;
@@ -348,7 +348,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
348348
vsi->xsk_pools[qid] = pool;
349349
vsi->num_xsk_pools_used++;
350350

351-
err = xsk_buff_dma_map(vsi->xsk_pools[qid]->umem, ice_pf_to_dev(vsi->back),
351+
err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
352352
ICE_RX_DMA_ATTR);
353353
if (err)
354354
return err;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
425425
rx_buf = &rx_ring->rx_buf[ntu];
426426

427427
do {
428-
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
428+
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
429429
if (!rx_buf->xdp) {
430430
ret = true;
431431
break;
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
645645
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
646646
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
647647

648-
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
648+
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
649649
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
650-
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
650+
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
651651
else
652-
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
652+
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
653653

654654
return (int)total_rx_packets;
655655
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
682682

683683
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
684684

685-
if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
685+
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
686686
break;
687687

688-
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, desc.addr);
689-
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
688+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
689+
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
690690
desc.len);
691691

692692
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
703703

704704
if (tx_desc) {
705705
ice_xdp_ring_update_tail(xdp_ring);
706-
xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
706+
xsk_tx_release(xdp_ring->xsk_pool);
707707
}
708708

709709
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
777777
xdp_ring->next_to_clean = ntc;
778778

779779
if (xsk_frames)
780-
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
780+
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
781781

782-
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_pool->umem))
783-
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool->umem);
782+
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
783+
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
784784

785785
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
786786
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
896896
}
897897

898898
if (xsk_frames)
899-
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
899+
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
900900
}

drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3714,7 +3714,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
37143714

37153715
/* configure the packet buffer length */
37163716
if (rx_ring->xsk_pool) {
3717-
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_pool->umem);
3717+
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
37183718

37193719
/* If the MAC support setting RXDCTL.RLPML, the
37203720
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4064,7 +4064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
40644064
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
40654065
MEM_TYPE_XSK_BUFF_POOL,
40664066
NULL));
4067-
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
4067+
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
40684068
} else {
40694069
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
40704070
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4120,7 +4120,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
41204120
}
41214121

41224122
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4123-
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
4123+
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
41244124

41254125
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
41264126
IXGBE_RXDCTL_RLPML_EN);

drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
1717
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
1818
return NULL;
1919

20-
return xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
20+
return xsk_get_pool_from_qid(adapter->netdev, qid);
2121
}
2222

2323
static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
@@ -35,7 +35,7 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
3535
qid >= netdev->real_num_tx_queues)
3636
return -EINVAL;
3737

38-
err = xsk_buff_dma_map(pool->umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
38+
err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
3939
if (err)
4040
return err;
4141

@@ -64,7 +64,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
6464
struct xsk_buff_pool *pool;
6565
bool if_running;
6666

67-
pool = xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
67+
pool = xsk_get_pool_from_qid(adapter->netdev, qid);
6868
if (!pool)
6969
return -EINVAL;
7070

@@ -75,7 +75,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
7575
ixgbe_txrx_ring_disable(adapter, qid);
7676

7777
clear_bit(qid, adapter->af_xdp_zc_qps);
78-
xsk_buff_dma_unmap(pool->umem, IXGBE_RX_DMA_ATTR);
78+
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
7979

8080
if (if_running)
8181
ixgbe_txrx_ring_enable(adapter, qid);
@@ -150,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
150150
i -= rx_ring->count;
151151

152152
do {
153-
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
153+
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
154154
if (!bi->xdp) {
155155
ok = false;
156156
break;
@@ -345,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
345345
q_vector->rx.total_packets += total_rx_packets;
346346
q_vector->rx.total_bytes += total_rx_bytes;
347347

348-
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
348+
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
349349
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
350-
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
350+
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
351351
else
352-
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
352+
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
353353

354354
return (int)total_rx_packets;
355355
}
@@ -389,11 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
389389
break;
390390
}
391391

392-
if (!xsk_umem_consume_tx(pool->umem, &desc))
392+
if (!xsk_tx_peek_desc(pool, &desc))
393393
break;
394394

395-
dma = xsk_buff_raw_get_dma(pool->umem, desc.addr);
396-
xsk_buff_raw_dma_sync_for_device(pool->umem, dma, desc.len);
395+
dma = xsk_buff_raw_get_dma(pool, desc.addr);
396+
xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
397397

398398
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
399399
tx_bi->bytecount = desc.len;
@@ -419,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
419419

420420
if (tx_desc) {
421421
ixgbe_xdp_ring_update_tail(xdp_ring);
422-
xsk_umem_consume_tx_done(pool->umem);
422+
xsk_tx_release(pool);
423423
}
424424

425425
return !!budget && work_done;
@@ -485,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
485485
q_vector->tx.total_packets += total_packets;
486486

487487
if (xsk_frames)
488-
xsk_umem_complete_tx(pool->umem, xsk_frames);
488+
xsk_tx_completed(pool, xsk_frames);
489489

490-
if (xsk_umem_uses_need_wakeup(pool->umem))
491-
xsk_set_tx_need_wakeup(pool->umem);
490+
if (xsk_uses_need_wakeup(pool))
491+
xsk_set_tx_need_wakeup(pool);
492492

493493
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
494494
}
@@ -547,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
547547
}
548548

549549
if (xsk_frames)
550-
xsk_umem_complete_tx(pool->umem, xsk_frames);
550+
xsk_tx_completed(pool, xsk_frames);
551551
}

0 commit comments

Comments
 (0)