Skip to content

Commit 3106c58

Browse files
magnus-karlssonborkmann
authored andcommitted
i40e: Use batched xsk Tx interfaces to increase performance
Use the new batched xsk interfaces for the Tx path in the i40e driver to improve performance. On my machine, this yields a throughput increase of 4% for the l2fwd sample app in xdpsock. If we instead just look at the Tx part, this patch set increases throughput with above 20% for Tx. Note that I had to explicitly loop unroll the inner loop to get to this performance level, by using a pragma. It is honored by both clang and gcc and should be ignored by versions that do not support it. Using the -funroll-loops compiler command line switch on the source file resulted in a loop unrolling on a higher level that lead to a performance decrease instead of an increase. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/1605525167-14450-6-git-send-email-magnus.karlsson@gmail.com
1 parent 9349eb3 commit 3106c58

File tree

4 files changed

+112
-35
lines changed

4 files changed

+112
-35
lines changed

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -676,6 +676,8 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
676676
i40e_clean_tx_ring(tx_ring);
677677
kfree(tx_ring->tx_bi);
678678
tx_ring->tx_bi = NULL;
679+
kfree(tx_ring->xsk_descs);
680+
tx_ring->xsk_descs = NULL;
679681

680682
if (tx_ring->desc) {
681683
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1277,6 +1279,13 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
12771279
if (!tx_ring->tx_bi)
12781280
goto err;
12791281

1282+
if (ring_is_xdp(tx_ring)) {
1283+
tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
1284+
GFP_KERNEL);
1285+
if (!tx_ring->xsk_descs)
1286+
goto err;
1287+
}
1288+
12801289
u64_stats_init(&tx_ring->syncp);
12811290

12821291
/* round up to nearest 4K */
@@ -1300,6 +1309,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
13001309
return 0;
13011310

13021311
err:
1312+
kfree(tx_ring->xsk_descs);
1313+
tx_ring->xsk_descs = NULL;
13031314
kfree(tx_ring->tx_bi);
13041315
tx_ring->tx_bi = NULL;
13051316
return -ENOMEM;

drivers/net/ethernet/intel/i40e/i40e_txrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,7 @@ struct i40e_ring {
389389
struct i40e_channel *ch;
390390
struct xdp_rxq_info xdp_rxq;
391391
struct xsk_buff_pool *xsk_pool;
392+
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
392393
} ____cacheline_internodealigned_in_smp;
393394

394395
static inline bool ring_uses_build_skb(struct i40e_ring *ring)

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 84 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
/* Copyright(c) 2018 Intel Corporation. */
33

44
#include <linux/bpf_trace.h>
5+
#include <linux/stringify.h>
56
#include <net/xdp_sock_drv.h>
67
#include <net/xdp.h>
78

@@ -381,6 +382,69 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
381382
return failure ? budget : (int)total_rx_packets;
382383
}
383384

385+
static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
386+
unsigned int *total_bytes)
387+
{
388+
struct i40e_tx_desc *tx_desc;
389+
dma_addr_t dma;
390+
391+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
392+
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
393+
394+
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
395+
tx_desc->buffer_addr = cpu_to_le64(dma);
396+
tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
397+
0, desc->len, 0);
398+
399+
*total_bytes += desc->len;
400+
}
401+
402+
static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
403+
unsigned int *total_bytes)
404+
{
405+
u16 ntu = xdp_ring->next_to_use;
406+
struct i40e_tx_desc *tx_desc;
407+
dma_addr_t dma;
408+
u32 i;
409+
410+
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
411+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
412+
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
413+
414+
tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
415+
tx_desc->buffer_addr = cpu_to_le64(dma);
416+
tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
417+
I40E_TX_DESC_CMD_EOP,
418+
0, desc[i].len, 0);
419+
420+
*total_bytes += desc[i].len;
421+
}
422+
423+
xdp_ring->next_to_use = ntu;
424+
}
425+
426+
static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
427+
unsigned int *total_bytes)
428+
{
429+
u32 batched, leftover, i;
430+
431+
batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
432+
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
433+
for (i = 0; i < batched; i += PKTS_PER_BATCH)
434+
i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
435+
for (i = batched; i < batched + leftover; i++)
436+
i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
437+
}
438+
439+
static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
440+
{
441+
u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
442+
struct i40e_tx_desc *tx_desc;
443+
444+
tx_desc = I40E_TX_DESC(xdp_ring, ntu);
445+
tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
446+
}
447+
384448
/**
385449
* i40e_xmit_zc - Performs zero-copy Tx AF_XDP
386450
* @xdp_ring: XDP Tx ring
@@ -390,45 +454,30 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
390454
**/
391455
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
392456
{
393-
unsigned int sent_frames = 0, total_bytes = 0;
394-
struct i40e_tx_desc *tx_desc = NULL;
395-
struct xdp_desc desc;
396-
dma_addr_t dma;
397-
398-
while (budget-- > 0) {
399-
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
400-
break;
401-
402-
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
403-
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
404-
desc.len);
405-
406-
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
407-
tx_desc->buffer_addr = cpu_to_le64(dma);
408-
tx_desc->cmd_type_offset_bsz =
409-
build_ctob(I40E_TX_DESC_CMD_ICRC
410-
| I40E_TX_DESC_CMD_EOP,
411-
0, desc.len, 0);
412-
413-
sent_frames++;
414-
total_bytes += desc.len;
415-
416-
xdp_ring->next_to_use++;
417-
if (xdp_ring->next_to_use == xdp_ring->count)
418-
xdp_ring->next_to_use = 0;
457+
struct xdp_desc *descs = xdp_ring->xsk_descs;
458+
u32 nb_pkts, nb_processed = 0;
459+
unsigned int total_bytes = 0;
460+
461+
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
462+
if (!nb_pkts)
463+
return false;
464+
465+
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
466+
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
467+
i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
468+
xdp_ring->next_to_use = 0;
419469
}
420470

421-
if (tx_desc) {
422-
/* Request an interrupt for the last frame and bump tail ptr. */
423-
tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
424-
I40E_TXD_QW1_CMD_SHIFT);
425-
i40e_xdp_ring_update_tail(xdp_ring);
471+
i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
472+
&total_bytes);
426473

427-
xsk_tx_release(xdp_ring->xsk_pool);
428-
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
429-
}
474+
/* Request an interrupt for the last frame and bump tail ptr. */
475+
i40e_set_rs_bit(xdp_ring);
476+
i40e_xdp_ring_update_tail(xdp_ring);
477+
478+
i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
430479

431-
return !!budget;
480+
return true;
432481
}
433482

434483
/**

drivers/net/ethernet/intel/i40e/i40e_xsk.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,22 @@
44
#ifndef _I40E_XSK_H_
55
#define _I40E_XSK_H_
66

7+
/* This value should match the pragma in the loop_unrolled_for
8+
* macro. Why 4? It is strictly empirical. It seems to be a good
9+
* compromise between the advantage of having simultaneous outstanding
10+
* reads to the DMA array that can hide each others latency and the
11+
* disadvantage of having a larger code path.
12+
*/
13+
#define PKTS_PER_BATCH 4
14+
15+
#ifdef __clang__
16+
#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
17+
#elif __GNUC__ >= 8
18+
#define loop_unrolled_for _Pragma("GCC unroll 4") for
19+
#else
20+
#define loop_unrolled_for for
21+
#endif
22+
723
struct i40e_vsi;
824
struct xsk_buff_pool;
925
struct zero_copy_allocator;

0 commit comments

Comments
 (0)