Skip to content

Commit 1dc4c55

Browse files
CCX-Stingraydavem330
authored andcommitted
bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff
Since we have an xdp_buff with frags there needs to be a way to convert that into a valid sk_buff in the event that XDP_PASS is the resulting operation. This adds a new rx_skb_func when the netdev has an MTU that prevents the packets from sitting in a single page. This also make sure that GRO/LRO stay disabled even when using the aggregation ring for large buffers. v3: Use BNXT_PAGE_MODE_BUF_SIZE for build_skb Signed-off-by: Andy Gospodarek <gospo@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 9a6aa35 commit 1dc4c55

File tree

3 files changed

+85
-7
lines changed

3 files changed

+85
-7
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

+58-7
Original file line numberDiff line numberDiff line change
@@ -971,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
971971
rxr->rx_sw_agg_prod = sw_prod;
972972
}
973973

974+
static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
975+
struct bnxt_rx_ring_info *rxr,
976+
u16 cons, void *data, u8 *data_ptr,
977+
dma_addr_t dma_addr,
978+
unsigned int offset_and_len)
979+
{
980+
unsigned int len = offset_and_len & 0xffff;
981+
struct page *page = data;
982+
u16 prod = rxr->rx_prod;
983+
struct sk_buff *skb;
984+
int err;
985+
986+
err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
987+
if (unlikely(err)) {
988+
bnxt_reuse_rx_data(rxr, cons, data);
989+
return NULL;
990+
}
991+
dma_addr -= bp->rx_dma_offset;
992+
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
993+
DMA_ATTR_WEAK_ORDERING);
994+
skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
995+
bp->rx_dma_offset);
996+
if (!skb) {
997+
__free_page(page);
998+
return NULL;
999+
}
1000+
skb_mark_for_recycle(skb);
1001+
skb_reserve(skb, bp->rx_dma_offset);
1002+
__skb_put(skb, len);
1003+
1004+
return skb;
1005+
}
1006+
9741007
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
9751008
struct bnxt_rx_ring_info *rxr,
9761009
u16 cons, void *data, u8 *data_ptr,
@@ -993,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
9931026
dma_addr -= bp->rx_dma_offset;
9941027
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
9951028
DMA_ATTR_WEAK_ORDERING);
996-
page_pool_release_page(rxr->page_pool, page);
9971029

9981030
if (unlikely(!payload))
9991031
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1004,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
10041036
return NULL;
10051037
}
10061038

1039+
skb_mark_for_recycle(skb);
10071040
off = (void *)data_ptr - page_address(page);
10081041
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
10091042
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1949,6 +1982,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
19491982
rc = -ENOMEM;
19501983
goto next_rx;
19511984
}
1985+
} else {
1986+
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1987+
if (!skb) {
1988+
/* we should be able to free the old skb here */
1989+
cpr->sw_stats.rx.rx_oom_discards += 1;
1990+
rc = -ENOMEM;
1991+
goto next_rx;
1992+
}
19521993
}
19531994
}
19541995

@@ -3964,14 +4005,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
39644005
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
39654006
{
39664007
if (page_mode) {
3967-
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3968-
return -EOPNOTSUPP;
3969-
bp->dev->max_mtu =
3970-
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
39714008
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3972-
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
4009+
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4010+
4011+
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4012+
bp->flags |= BNXT_FLAG_JUMBO;
4013+
bp->rx_skb_func = bnxt_rx_multi_page_skb;
4014+
bp->dev->max_mtu =
4015+
min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4016+
} else {
4017+
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4018+
bp->rx_skb_func = bnxt_rx_page_skb;
4019+
bp->dev->max_mtu =
4020+
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4021+
}
39734022
bp->rx_dir = DMA_BIDIRECTIONAL;
3974-
bp->rx_skb_func = bnxt_rx_page_skb;
39754023
/* Disable LRO or GRO_HW */
39764024
netdev_update_features(bp->dev);
39774025
} else {
@@ -11121,6 +11169,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
1112111169
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
1112211170
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
1112311171

11172+
if (!(bp->flags & BNXT_FLAG_TPA))
11173+
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11174+
1112411175
if (!(features & NETIF_F_GRO))
1112511176
features &= ~NETIF_F_GRO_HW;
1112611177

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c

+23
Original file line numberDiff line numberDiff line change
@@ -361,3 +361,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
361361
}
362362
return rc;
363363
}
364+
365+
struct sk_buff *
366+
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
367+
struct page_pool *pool, struct xdp_buff *xdp,
368+
struct rx_cmp_ext *rxcmp1)
369+
{
370+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
371+
372+
if (!skb)
373+
return NULL;
374+
skb_checksum_none_assert(skb);
375+
if (RX_CMP_L4_CS_OK(rxcmp1)) {
376+
if (bp->dev->features & NETIF_F_RXCSUM) {
377+
skb->ip_summed = CHECKSUM_UNNECESSARY;
378+
skb->csum_level = RX_CMP_ENCAP(rxcmp1);
379+
}
380+
}
381+
xdp_update_skb_shared_info(skb, num_frags,
382+
sinfo->xdp_frags_size,
383+
PAGE_SIZE * sinfo->nr_frags,
384+
xdp_buff_is_frag_pfmemalloc(xdp));
385+
return skb;
386+
}

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h

+4
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,8 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
2828
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2929
u16 cons, u8 **data_ptr, unsigned int *len,
3030
struct xdp_buff *xdp);
31+
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
32+
u8 num_frags, struct page_pool *pool,
33+
struct xdp_buff *xdp,
34+
struct rx_cmp_ext *rxcmp1);
3135
#endif

0 commit comments

Comments
 (0)