Skip to content

Commit

Permalink
iavf: consolidate skb fields processing
Browse files Browse the repository at this point in the history
For now, filling the skb fields on Rx is a bit scattered across RQ
polling function. This makes it harder to reuse the code on XSk Rx
path and also sometimes costs some CPU (e.g. doing a lookup for the
decoded packet type two times).
Make it consistent and do everything in iavf_process_skb_fields(). First
of all, get the packet type and decode it. Then, move to hash, csum and
VLAN, which is moved here too. iavf_receive_skb() becomes then the
classic eth_type_trans() + napi_gro_receive() pair.
Finally, make the fields processing function global and the skb receive
function static inline in order to call them from a different file later
on.

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
  • Loading branch information
alobakin committed Mar 3, 2023
1 parent f9224f4 commit ab9664d
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 59 deletions.
107 changes: 48 additions & 59 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -890,27 +890,6 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
return ret;
}

/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
**/
static void iavf_receive_skb(struct iavf_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
struct iavf_q_vector *q_vector = rx_ring->q_vector;

if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);

napi_gro_receive(&q_vector->napi, skb);
}

/**
* __iavf_alloc_rx_pages - Replace used receive pages
* @rx_ring: ring to place buffers on
Expand Down Expand Up @@ -979,22 +958,18 @@ void iavf_alloc_rx_pages(struct iavf_ring *rxr)
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @qword: `wb.qword1.status_error_len` from the descriptor
* @decoded: TODO
**/
static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
u64 qword)
static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb,
u64 qword, struct iavf_rx_ptype_decoded decoded)
{
struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;

ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
IAVF_RXD_QW1_ERROR_SHIFT;
rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
IAVF_RXD_QW1_STATUS_SHIFT;
decoded = decode_rx_desc_ptype(ptype);

skb->ip_summed = CHECKSUM_NONE;

Expand Down Expand Up @@ -1058,14 +1033,12 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi,

/**
* iavf_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
* @decoded: TODO
*
* Returns a hash type to be used by skb_set_hash
**/
static inline int iavf_ptype_to_htype(u8 ptype)
static int iavf_ptype_to_htype(struct iavf_rx_ptype_decoded decoded)
{
struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);

if (!decoded.known)
return PKT_HASH_TYPE_NONE;

Expand All @@ -1085,27 +1058,49 @@ static inline int iavf_ptype_to_htype(u8 ptype)
* @rx_desc: specific descriptor
* @skb: skb currently being received and modified
* @qword: `wb.qword1.status_error_len` from the descriptor
* @decoded: TODO
**/
static inline void iavf_rx_hash(struct iavf_ring *ring,
union iavf_rx_desc *rx_desc,
struct sk_buff *skb,
u64 qword)
static void iavf_rx_hash(const struct iavf_ring *ring,
const union iavf_rx_desc *rx_desc,
struct sk_buff *skb, u64 qword,
struct iavf_rx_ptype_decoded decoded)
{
u32 hash;
const u64 rss_mask = (u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT;
u8 rx_ptype;
u32 hash;

if (!(ring->netdev->features & NETIF_F_RXHASH))
return;

if ((qword & rss_mask) == rss_mask) {
rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
skb_set_hash(skb, hash, iavf_ptype_to_htype(decoded));
}
}

static void iavf_rx_vlan(const struct iavf_ring *rx_ring,
const union iavf_rx_desc *rx_desc,
struct sk_buff *skb, u64 qword)
{
u16 vlan_tag = 0;

if ((qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) &&
(rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1))
vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
if ((rx_desc->wb.qword2.ext_status &
cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT))) &&
(rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2))
vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);

if (!(vlan_tag & VLAN_VID_MASK))
return;

if (rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
else if (rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
}

/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
Expand All @@ -1117,19 +1112,21 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
static inline
void iavf_process_skb_fields(struct iavf_ring *rx_ring,
union iavf_rx_desc *rx_desc, struct sk_buff *skb,
u64 qword)
void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
const union iavf_rx_desc *rx_desc,
struct sk_buff *skb, u64 qword)
{
iavf_rx_hash(rx_ring, rx_desc, skb, qword);
struct iavf_rx_ptype_decoded decoded;
u32 ptype;

iavf_rx_checksum(rx_ring->vsi, skb, qword);
ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);

skb_record_rx_queue(skb, rx_ring->queue_index);
iavf_rx_hash(rx_ring, rx_desc, skb, qword, decoded);
iavf_rx_checksum(rx_ring->vsi, skb, qword, decoded);
iavf_rx_vlan(rx_ring, rx_desc, skb, qword);

/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb_record_rx_queue(skb, rx_ring->queue_index);
}

/**
Expand Down Expand Up @@ -1287,7 +1284,6 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
union iavf_rx_desc *rx_desc;
u32 size, put_size;
struct page *page;
u16 vlan_tag = 0;
u64 qword;

/* return some buffers to hardware, one at a time is too slow */
Expand Down Expand Up @@ -1397,16 +1393,9 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, qword);

if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
if (rx_desc->wb.qword2.ext_status &
cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);

iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
iavf_receive_skb(rx_ring, skb, vlan_tag);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb = NULL;

/* update budget accounting */
Expand Down
3 changes: 3 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,9 @@ bool __iavf_chk_linearize(struct sk_buff *skb);

DECLARE_STATIC_KEY_FALSE(iavf_xdp_locking_key);

void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
const union iavf_rx_desc *rx_desc,
struct sk_buff *skb, u64 qword);
int iavf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);

Expand Down

0 comments on commit ab9664d

Please sign in to comment.