@@ -24,36 +24,91 @@ DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
2424
2525struct bnxt_sw_tx_bd * bnxt_xmit_bd (struct bnxt * bp ,
2626 struct bnxt_tx_ring_info * txr ,
27- dma_addr_t mapping , u32 len )
27+ dma_addr_t mapping , u32 len ,
28+ struct xdp_buff * xdp )
2829{
29- struct bnxt_sw_tx_bd * tx_buf ;
30+ struct skb_shared_info * sinfo ;
31+ struct bnxt_sw_tx_bd * tx_buf , * first_buf ;
3032 struct tx_bd * txbd ;
33+ int num_frags = 0 ;
3134 u32 flags ;
3235 u16 prod ;
36+ int i ;
37+
38+ if (xdp && xdp_buff_has_frags (xdp )) {
39+ sinfo = xdp_get_shared_info_from_buff (xdp );
40+ num_frags = sinfo -> nr_frags ;
41+ }
3342
43+ /* fill up the first buffer */
3444 prod = txr -> tx_prod ;
3545 tx_buf = & txr -> tx_buf_ring [prod ];
46+ first_buf = tx_buf ;
47+ tx_buf -> nr_frags = num_frags ;
48+ if (xdp )
49+ tx_buf -> page = virt_to_head_page (xdp -> data );
3650
3751 txbd = & txr -> tx_desc_ring [TX_RING (prod )][TX_IDX (prod )];
38- flags = (len << TX_BD_LEN_SHIFT ) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT ) |
39- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr [len >> 9 ];
52+ flags = ((len ) << TX_BD_LEN_SHIFT ) | ((num_frags + 1 ) << TX_BD_FLAGS_BD_CNT_SHIFT );
4053 txbd -> tx_bd_len_flags_type = cpu_to_le32 (flags );
4154 txbd -> tx_bd_opaque = prod ;
4255 txbd -> tx_bd_haddr = cpu_to_le64 (mapping );
4356
57+ /* now let us fill up the frags into the next buffers */
58+ for (i = 0 ; i < num_frags ; i ++ ) {
59+ skb_frag_t * frag = & sinfo -> frags [i ];
60+ struct bnxt_sw_tx_bd * frag_tx_buf ;
61+ struct pci_dev * pdev = bp -> pdev ;
62+ dma_addr_t frag_mapping ;
63+ int frag_len ;
64+
65+ prod = NEXT_TX (prod );
66+ txr -> tx_prod = prod ;
67+
68+ /* first fill up the first buffer */
69+ frag_tx_buf = & txr -> tx_buf_ring [prod ];
70+ frag_tx_buf -> page = skb_frag_page (frag );
71+
72+ txbd = & txr -> tx_desc_ring [TX_RING (prod )][TX_IDX (prod )];
73+
74+ frag_len = skb_frag_size (frag );
75+ frag_mapping = skb_frag_dma_map (& pdev -> dev , frag , 0 ,
76+ frag_len , DMA_TO_DEVICE );
77+
78+ if (unlikely (dma_mapping_error (& pdev -> dev , frag_mapping )))
79+ return NULL ;
80+
81+ dma_unmap_addr_set (frag_tx_buf , mapping , frag_mapping );
82+
83+ flags = frag_len << TX_BD_LEN_SHIFT ;
84+ txbd -> tx_bd_len_flags_type = cpu_to_le32 (flags );
85+ txbd -> tx_bd_opaque = prod ;
86+ txbd -> tx_bd_haddr = cpu_to_le64 (frag_mapping );
87+
88+ len = frag_len ;
89+ }
90+
91+ flags &= ~TX_BD_LEN ;
92+ txbd -> tx_bd_len_flags_type = cpu_to_le32 (((len ) << TX_BD_LEN_SHIFT ) | flags |
93+ TX_BD_FLAGS_PACKET_END );
94+ /* Sync TX BD */
95+ wmb ();
4496 prod = NEXT_TX (prod );
4597 txr -> tx_prod = prod ;
46- return tx_buf ;
98+
99+ return first_buf ;
47100}
48101
49102static void __bnxt_xmit_xdp (struct bnxt * bp , struct bnxt_tx_ring_info * txr ,
50- dma_addr_t mapping , u32 len , u16 rx_prod )
103+ dma_addr_t mapping , u32 len , u16 rx_prod ,
104+ struct xdp_buff * xdp )
51105{
52106 struct bnxt_sw_tx_bd * tx_buf ;
53107
54- tx_buf = bnxt_xmit_bd (bp , txr , mapping , len );
108+ tx_buf = bnxt_xmit_bd (bp , txr , mapping , len , xdp );
55109 tx_buf -> rx_prod = rx_prod ;
56110 tx_buf -> action = XDP_TX ;
111+
57112}
58113
59114static void __bnxt_xmit_xdp_redirect (struct bnxt * bp ,
@@ -63,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
63118{
64119 struct bnxt_sw_tx_bd * tx_buf ;
65120
66- tx_buf = bnxt_xmit_bd (bp , txr , mapping , len );
121+ tx_buf = bnxt_xmit_bd (bp , txr , mapping , len , NULL );
67122 tx_buf -> action = XDP_REDIRECT ;
68123 tx_buf -> xdpf = xdpf ;
69124 dma_unmap_addr_set (tx_buf , mapping , mapping );
@@ -78,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
78133 struct bnxt_sw_tx_bd * tx_buf ;
79134 u16 tx_cons = txr -> tx_cons ;
80135 u16 last_tx_cons = tx_cons ;
81- int i ;
136+ int i , j , frags ;
82137
83138 for (i = 0 ; i < nr_pkts ; i ++ ) {
84139 tx_buf = & txr -> tx_buf_ring [tx_cons ];
@@ -96,13 +151,21 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
96151 } else if (tx_buf -> action == XDP_TX ) {
97152 rx_doorbell_needed = true;
98153 last_tx_cons = tx_cons ;
154+
155+ frags = tx_buf -> nr_frags ;
156+ for (j = 0 ; j < frags ; j ++ ) {
157+ tx_cons = NEXT_TX (tx_cons );
158+ tx_buf = & txr -> tx_buf_ring [tx_cons ];
159+ page_pool_recycle_direct (rxr -> page_pool , tx_buf -> page );
160+ }
99161 }
100162 tx_cons = NEXT_TX (tx_cons );
101163 }
102164 txr -> tx_cons = tx_cons ;
103165 if (rx_doorbell_needed ) {
104166 tx_buf = & txr -> tx_buf_ring [last_tx_cons ];
105167 bnxt_db_write (bp , & rxr -> rx_db , tx_buf -> rx_prod );
168+
106169 }
107170}
108171
@@ -133,6 +196,23 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
133196 xdp_prepare_buff (xdp , * data_ptr - offset , offset , * len , false);
134197}
135198
199+ void bnxt_xdp_buff_frags_free (struct bnxt_rx_ring_info * rxr ,
200+ struct xdp_buff * xdp )
201+ {
202+ struct skb_shared_info * shinfo ;
203+ int i ;
204+
205+ if (!xdp || !xdp_buff_has_frags (xdp ))
206+ return ;
207+ shinfo = xdp_get_shared_info_from_buff (xdp );
208+ for (i = 0 ; i < shinfo -> nr_frags ; i ++ ) {
209+ struct page * page = skb_frag_page (& shinfo -> frags [i ]);
210+
211+ page_pool_recycle_direct (rxr -> page_pool , page );
212+ }
213+ shinfo -> nr_frags = 0 ;
214+ }
215+
136216/* returns the following:
137217 * true - packet consumed by XDP and new buffer is allocated.
138218 * false - packet should be passed to the stack.
@@ -145,6 +225,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
145225 struct bnxt_sw_rx_bd * rx_buf ;
146226 struct pci_dev * pdev ;
147227 dma_addr_t mapping ;
228+ u32 tx_needed = 1 ;
148229 void * orig_data ;
149230 u32 tx_avail ;
150231 u32 offset ;
@@ -180,18 +261,28 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
180261 case XDP_TX :
181262 rx_buf = & rxr -> rx_buf_ring [cons ];
182263 mapping = rx_buf -> mapping - bp -> rx_dma_offset ;
264+ * event = 0 ;
265+
266+ if (unlikely (xdp_buff_has_frags (& xdp ))) {
267+ struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (& xdp );
183268
184- if (tx_avail < 1 ) {
269+ tx_needed += sinfo -> nr_frags ;
270+ * event = BNXT_AGG_EVENT ;
271+ }
272+
273+ if (tx_avail < tx_needed ) {
185274 trace_xdp_exception (bp -> dev , xdp_prog , act );
275+ bnxt_xdp_buff_frags_free (rxr , & xdp );
186276 bnxt_reuse_rx_data (rxr , cons , page );
187277 return true;
188278 }
189279
190- * event = BNXT_TX_EVENT ;
191280 dma_sync_single_for_device (& pdev -> dev , mapping + offset , * len ,
192281 bp -> rx_dir );
282+
283+ * event |= BNXT_TX_EVENT ;
193284 __bnxt_xmit_xdp (bp , txr , mapping + offset , * len ,
194- NEXT_RX (rxr -> rx_prod ));
285+ NEXT_RX (rxr -> rx_prod ), & xdp );
195286 bnxt_reuse_rx_data (rxr , cons , page );
196287 return true;
197288 case XDP_REDIRECT :
@@ -208,6 +299,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
208299 /* if we are unable to allocate a new buffer, abort and reuse */
209300 if (bnxt_alloc_rx_data (bp , rxr , rxr -> rx_prod , GFP_ATOMIC )) {
210301 trace_xdp_exception (bp -> dev , xdp_prog , act );
302+ bnxt_xdp_buff_frags_free (rxr , & xdp );
211303 bnxt_reuse_rx_data (rxr , cons , page );
212304 return true;
213305 }
@@ -227,6 +319,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
227319 trace_xdp_exception (bp -> dev , xdp_prog , act );
228320 fallthrough ;
229321 case XDP_DROP :
322+ bnxt_xdp_buff_frags_free (rxr , & xdp );
230323 bnxt_reuse_rx_data (rxr , cons , page );
231324 break ;
232325 }
0 commit comments