From 6f252a5d9ffb309709aca13943750dc718d8a86e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Feb 2023 19:02:22 +0100 Subject: [PATCH] iavf: consolidate skb fields processing For now, filling the skb fields on Rx is a bit scattered across RQ polling function. This makes it harder to reuse the code on XSk Rx path and also sometimes costs some CPU (e.g. doing a lookup for the decoded packet type two times). Make it consistent and do everything in iavf_process_skb_fields(). First of all, get the packet type and decode it. Then, move to hash, csum and VLAN, which is moved here too. iavf_receive_skb() becomes then the classic eth_type_trans() + napi_gro_receive() pair. Finally, make the fields processing function global and the skb receive function static inline in order to call them from a different file later on. Signed-off-by: Alexander Lobakin --- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 107 +++++++++----------- drivers/net/ethernet/intel/iavf/iavf_txrx.h | 3 + 2 files changed, 51 insertions(+), 59 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 6c3f08af7a2107..e2968c4173e9dd 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -890,27 +890,6 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) return ret; } -/** - * iavf_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play - * @skb: packet to send up - * @vlan_tag: vlan tag for packet - **/ -static void iavf_receive_skb(struct iavf_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) -{ - struct iavf_q_vector *q_vector = rx_ring->q_vector; - - if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && - vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); - - napi_gro_receive(&q_vector->napi, skb); -} - /** * __iavf_alloc_rx_pages - Replace used receive pages * @rx_ring: ring to place buffers on @@ -979,22 +958,18 @@ void iavf_alloc_rx_pages(struct iavf_ring *rxr) * @vsi: the VSI we care about * @skb: skb currently being received and modified * @qword: `wb.qword1.status_error_len` from the descriptor + * @decoded: TODO **/ -static inline void iavf_rx_checksum(struct iavf_vsi *vsi, - struct sk_buff *skb, - u64 qword) +static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, + u64 qword, struct iavf_rx_ptype_decoded decoded) { - struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; bool ipv4, ipv6; - u8 ptype; - ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> IAVF_RXD_QW1_ERROR_SHIFT; rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT; - decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; @@ -1058,14 +1033,12 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, /** * iavf_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor + * @decoded: TODO * * Returns a hash type to be used by skb_set_hash **/ -static inline int iavf_ptype_to_htype(u8 ptype) +static int iavf_ptype_to_htype(struct iavf_rx_ptype_decoded decoded) { - struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - if (!decoded.known) return PKT_HASH_TYPE_NONE; @@ -1085,27 +1058,49 @@ static inline int iavf_ptype_to_htype(u8 ptype) * @rx_desc: specific descriptor * @skb: skb currently being received and modified * @qword: `wb.qword1.status_error_len` from the descriptor + * @decoded: TODO **/ -static inline void iavf_rx_hash(struct iavf_ring *ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb, - u64 qword) +static void iavf_rx_hash(const struct iavf_ring *ring, + const union iavf_rx_desc *rx_desc, + struct sk_buff *skb, u64 qword, + struct iavf_rx_ptype_decoded decoded) { - u32 hash; const u64 rss_mask = (u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT; - u8 rx_ptype; + u32 hash; if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((qword & rss_mask) == rss_mask) { - rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); + skb_set_hash(skb, hash, iavf_ptype_to_htype(decoded)); } } +static void iavf_rx_vlan(const struct iavf_ring *rx_ring, + const union iavf_rx_desc *rx_desc, + struct sk_buff *skb, u64 qword) +{ + u16 vlan_tag = 0; + + if ((qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) && + (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)) + vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); + if ((rx_desc->wb.qword2.ext_status & + cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT))) && + (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)) + vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); + + if (!(vlan_tag & VLAN_VID_MASK)) + return; + + if (rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + else if (rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); +} + /** * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -1117,19 +1112,21 @@ static inline void iavf_rx_hash(struct iavf_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline -void iavf_process_skb_fields(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, struct sk_buff *skb, - u64 qword) +void iavf_process_skb_fields(const struct iavf_ring *rx_ring, + const union iavf_rx_desc *rx_desc, + struct sk_buff *skb, u64 qword) { - iavf_rx_hash(rx_ring, rx_desc, skb, qword); + struct iavf_rx_ptype_decoded decoded; + u32 ptype; - iavf_rx_checksum(rx_ring->vsi, skb, qword); + ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); + decoded = decode_rx_desc_ptype(ptype); - skb_record_rx_queue(skb, rx_ring->queue_index); + iavf_rx_hash(rx_ring, rx_desc, skb, qword, decoded); + iavf_rx_checksum(rx_ring->vsi, skb, qword, decoded); + iavf_rx_vlan(rx_ring, rx_desc, skb, qword); - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + skb_record_rx_queue(skb, rx_ring->queue_index); } /** @@ -1287,7 +1284,6 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) union iavf_rx_desc *rx_desc; u32 size, put_size; struct page *page; - u16 vlan_tag = 0; u64 qword; /* return some buffers to hardware, one at a time is too slow */ @@ -1397,16 +1393,9 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, qword); - if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && - rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) - vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); - if (rx_desc->wb.qword2.ext_status & - cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && - rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) - vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); - iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - iavf_receive_skb(rx_ring, skb, vlan_tag); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + napi_gro_receive(&rx_ring->q_vector->napi, skb); skb = NULL; /* update budget accounting */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 9f4d2aef11f0c6..79aa855aba812e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -386,6 +386,9 @@ bool __iavf_chk_linearize(struct sk_buff *skb); DECLARE_STATIC_KEY_FALSE(iavf_xdp_locking_key); +void iavf_process_skb_fields(const struct iavf_ring *rx_ring, + const union iavf_rx_desc *rx_desc, + struct sk_buff *skb, u64 qword); int iavf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags);