Skip to content

Commit 9c8f21e

Browse files
fengidriborkmann
authored andcommitted
xsk: Build skb by page (aka generic zerocopy xmit)
This patch is used to construct skb based on page to save memory copy overhead. This function is implemented based on IFF_TX_SKB_NO_LINEAR. Only the network card priv_flags supports IFF_TX_SKB_NO_LINEAR will use page to directly construct skb. If this feature is not supported, it is still necessary to copy data to construct skb. ---------------- Performance Testing ------------ The test environment is Aliyun ECS server. Test cmd: ``` xdpsock -i eth0 -t -S -s <msg size> ``` Test result data: size 64 512 1024 1500 copy 1916747 1775988 1600203 1440054 page 1974058 1953655 1945463 1904478 percent 3.0% 10.0% 21.58% 32.3% Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Alexander Lobakin <alobakin@pm.me> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Dust Li <dust.li@linux.alibaba.com> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20210218204908.5455-6-alobakin@pm.me
1 parent 3914d88 commit 9c8f21e

File tree

1 file changed

+96
-24
lines changed

1 file changed

+96
-24
lines changed

net/xdp/xsk.c

Lines changed: 96 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,97 @@ static void xsk_destruct_skb(struct sk_buff *skb)
445445
sock_wfree(skb);
446446
}
447447

448+
static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
449+
struct xdp_desc *desc)
450+
{
451+
struct xsk_buff_pool *pool = xs->pool;
452+
u32 hr, len, ts, offset, copy, copied;
453+
struct sk_buff *skb;
454+
struct page *page;
455+
void *buffer;
456+
int err, i;
457+
u64 addr;
458+
459+
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
460+
461+
skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
462+
if (unlikely(!skb))
463+
return ERR_PTR(err);
464+
465+
skb_reserve(skb, hr);
466+
467+
addr = desc->addr;
468+
len = desc->len;
469+
ts = pool->unaligned ? len : pool->chunk_size;
470+
471+
buffer = xsk_buff_raw_get_data(pool, addr);
472+
offset = offset_in_page(buffer);
473+
addr = buffer - pool->addrs;
474+
475+
for (copied = 0, i = 0; copied < len; i++) {
476+
page = pool->umem->pgs[addr >> PAGE_SHIFT];
477+
get_page(page);
478+
479+
copy = min_t(u32, PAGE_SIZE - offset, len - copied);
480+
skb_fill_page_desc(skb, i, page, offset, copy);
481+
482+
copied += copy;
483+
addr += copy;
484+
offset = 0;
485+
}
486+
487+
skb->len += len;
488+
skb->data_len += len;
489+
skb->truesize += ts;
490+
491+
refcount_add(ts, &xs->sk.sk_wmem_alloc);
492+
493+
return skb;
494+
}
495+
496+
static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
497+
struct xdp_desc *desc)
498+
{
499+
struct net_device *dev = xs->dev;
500+
struct sk_buff *skb;
501+
502+
if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
503+
skb = xsk_build_skb_zerocopy(xs, desc);
504+
if (IS_ERR(skb))
505+
return skb;
506+
} else {
507+
u32 hr, tr, len;
508+
void *buffer;
509+
int err;
510+
511+
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
512+
tr = dev->needed_tailroom;
513+
len = desc->len;
514+
515+
skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
516+
if (unlikely(!skb))
517+
return ERR_PTR(err);
518+
519+
skb_reserve(skb, hr);
520+
skb_put(skb, len);
521+
522+
buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
523+
err = skb_store_bits(skb, 0, buffer, len);
524+
if (unlikely(err)) {
525+
kfree_skb(skb);
526+
return ERR_PTR(err);
527+
}
528+
}
529+
530+
skb->dev = dev;
531+
skb->priority = xs->sk.sk_priority;
532+
skb->mark = xs->sk.sk_mark;
533+
skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
534+
skb->destructor = xsk_destruct_skb;
535+
536+
return skb;
537+
}
538+
448539
static int xsk_generic_xmit(struct sock *sk)
449540
{
450541
struct xdp_sock *xs = xdp_sk(sk);
@@ -454,56 +545,37 @@ static int xsk_generic_xmit(struct sock *sk)
454545
struct sk_buff *skb;
455546
unsigned long flags;
456547
int err = 0;
457-
u32 hr, tr;
458548

459549
mutex_lock(&xs->mutex);
460550

461551
if (xs->queue_id >= xs->dev->real_num_tx_queues)
462552
goto out;
463553

464-
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
465-
tr = xs->dev->needed_tailroom;
466-
467554
while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
468-
char *buffer;
469-
u64 addr;
470-
u32 len;
471-
472555
if (max_batch-- == 0) {
473556
err = -EAGAIN;
474557
goto out;
475558
}
476559

477-
len = desc.len;
478-
skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
479-
if (unlikely(!skb))
560+
skb = xsk_build_skb(xs, &desc);
561+
if (IS_ERR(skb)) {
562+
err = PTR_ERR(skb);
480563
goto out;
564+
}
481565

482-
skb_reserve(skb, hr);
483-
skb_put(skb, len);
484-
485-
addr = desc.addr;
486-
buffer = xsk_buff_raw_get_data(xs->pool, addr);
487-
err = skb_store_bits(skb, 0, buffer, len);
488566
/* This is the backpressure mechanism for the Tx path.
489567
* Reserve space in the completion queue and only proceed
490568
* if there is space in it. This avoids having to implement
491569
* any buffering in the Tx path.
492570
*/
493571
spin_lock_irqsave(&xs->pool->cq_lock, flags);
494-
if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
572+
if (xskq_prod_reserve(xs->pool->cq)) {
495573
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
496574
kfree_skb(skb);
497575
goto out;
498576
}
499577
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
500578

501-
skb->dev = xs->dev;
502-
skb->priority = sk->sk_priority;
503-
skb->mark = sk->sk_mark;
504-
skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
505-
skb->destructor = xsk_destruct_skb;
506-
507579
err = __dev_direct_xmit(skb, xs->queue_id);
508580
if (err == NETDEV_TX_BUSY) {
509581
/* Tell user-space to retry the send */

0 commit comments

Comments
 (0)