@@ -235,19 +235,18 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
235235 return false;
236236}
237237
238- static int rswitch_gwca_queue_alloc_skb (struct rswitch_gwca_queue * gq ,
239- unsigned int start_index ,
240- unsigned int num )
238+ static int rswitch_gwca_queue_alloc_rx_buf (struct rswitch_gwca_queue * gq ,
239+ unsigned int start_index ,
240+ unsigned int num )
241241{
242242 unsigned int i , index ;
243243
244244 for (i = 0 ; i < num ; i ++ ) {
245245 index = (i + start_index ) % gq -> ring_size ;
246- if (gq -> skbs [index ])
246+ if (gq -> rx_bufs [index ])
247247 continue ;
248- gq -> skbs [index ] = netdev_alloc_skb_ip_align (gq -> ndev ,
249- PKT_BUF_SZ + RSWITCH_ALIGN - 1 );
250- if (!gq -> skbs [index ])
248+ gq -> rx_bufs [index ] = netdev_alloc_frag (RSWITCH_BUF_SIZE );
249+ if (!gq -> rx_bufs [index ])
251250 goto err ;
252251 }
253252
@@ -256,8 +255,8 @@ static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
256255err :
257256 for (; i -- > 0 ; ) {
258257 index = (i + start_index ) % gq -> ring_size ;
259- dev_kfree_skb (gq -> skbs [index ]);
260- gq -> skbs [index ] = NULL ;
258+ skb_free_frag (gq -> rx_bufs [index ]);
259+ gq -> rx_bufs [index ] = NULL ;
261260 }
262261
263262 return - ENOMEM ;
@@ -275,16 +274,17 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
275274 gq -> rx_ring = NULL ;
276275
277276 for (i = 0 ; i < gq -> ring_size ; i ++ )
278- dev_kfree_skb (gq -> skbs [i ]);
277+ skb_free_frag (gq -> rx_bufs [i ]);
278+ kfree (gq -> rx_bufs );
279+ gq -> rx_bufs = NULL ;
279280 } else {
280281 dma_free_coherent (ndev -> dev .parent ,
281282 sizeof (struct rswitch_ext_desc ) *
282283 (gq -> ring_size + 1 ), gq -> tx_ring , gq -> ring_dma );
283284 gq -> tx_ring = NULL ;
285+ kfree (gq -> skbs );
286+ gq -> skbs = NULL ;
284287 }
285-
286- kfree (gq -> skbs );
287- gq -> skbs = NULL ;
288288}
289289
290290static void rswitch_gwca_ts_queue_free (struct rswitch_private * priv )
@@ -308,17 +308,20 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
308308 gq -> ring_size = ring_size ;
309309 gq -> ndev = ndev ;
310310
311- gq -> skbs = kcalloc (gq -> ring_size , sizeof (* gq -> skbs ), GFP_KERNEL );
312- if (!gq -> skbs )
313- return - ENOMEM ;
314-
315311 if (!dir_tx ) {
316- rswitch_gwca_queue_alloc_skb (gq , 0 , gq -> ring_size );
312+ gq -> rx_bufs = kcalloc (gq -> ring_size , sizeof (* gq -> rx_bufs ), GFP_KERNEL );
313+ if (!gq -> rx_bufs )
314+ return - ENOMEM ;
315+ if (rswitch_gwca_queue_alloc_rx_buf (gq , 0 , gq -> ring_size ) < 0 )
316+ goto out ;
317317
318318 gq -> rx_ring = dma_alloc_coherent (ndev -> dev .parent ,
319319 sizeof (struct rswitch_ext_ts_desc ) *
320320 (gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
321321 } else {
322+ gq -> skbs = kcalloc (gq -> ring_size , sizeof (* gq -> skbs ), GFP_KERNEL );
323+ if (!gq -> skbs )
324+ return - ENOMEM ;
322325 gq -> tx_ring = dma_alloc_coherent (ndev -> dev .parent ,
323326 sizeof (struct rswitch_ext_desc ) *
324327 (gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
@@ -367,12 +370,13 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
367370 for (i = 0 , desc = gq -> tx_ring ; i < gq -> ring_size ; i ++ , desc ++ ) {
368371 if (!gq -> dir_tx ) {
369372 dma_addr = dma_map_single (ndev -> dev .parent ,
370- gq -> skbs [i ]-> data , PKT_BUF_SZ ,
373+ gq -> rx_bufs [i ] + RSWITCH_HEADROOM ,
374+ RSWITCH_MAP_BUF_SIZE ,
371375 DMA_FROM_DEVICE );
372376 if (dma_mapping_error (ndev -> dev .parent , dma_addr ))
373377 goto err ;
374378
375- desc -> desc .info_ds = cpu_to_le16 (PKT_BUF_SZ );
379+ desc -> desc .info_ds = cpu_to_le16 (RSWITCH_DESC_BUF_SIZE );
376380 rswitch_desc_set_dptr (& desc -> desc , dma_addr );
377381 desc -> desc .die_dt = DT_FEMPTY | DIE ;
378382 } else {
@@ -395,8 +399,8 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
395399 if (!gq -> dir_tx ) {
396400 for (desc = gq -> tx_ring ; i -- > 0 ; desc ++ ) {
397401 dma_addr = rswitch_desc_get_dptr (& desc -> desc );
398- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
399- DMA_FROM_DEVICE );
402+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
403+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
400404 }
401405 }
402406
@@ -433,12 +437,13 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
433437 desc = & gq -> rx_ring [index ];
434438 if (!gq -> dir_tx ) {
435439 dma_addr = dma_map_single (ndev -> dev .parent ,
436- gq -> skbs [index ]-> data , PKT_BUF_SZ ,
440+ gq -> rx_bufs [index ] + RSWITCH_HEADROOM ,
441+ RSWITCH_MAP_BUF_SIZE ,
437442 DMA_FROM_DEVICE );
438443 if (dma_mapping_error (ndev -> dev .parent , dma_addr ))
439444 goto err ;
440445
441- desc -> desc .info_ds = cpu_to_le16 (PKT_BUF_SZ );
446+ desc -> desc .info_ds = cpu_to_le16 (RSWITCH_DESC_BUF_SIZE );
442447 rswitch_desc_set_dptr (& desc -> desc , dma_addr );
443448 dma_wmb ();
444449 desc -> desc .die_dt = DT_FEMPTY | DIE ;
@@ -456,8 +461,8 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
456461 index = (i + start_index ) % gq -> ring_size ;
457462 desc = & gq -> rx_ring [index ];
458463 dma_addr = rswitch_desc_get_dptr (& desc -> desc );
459- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
460- DMA_FROM_DEVICE );
464+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
465+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
461466 }
462467 }
463468
@@ -724,10 +729,15 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
724729 while ((desc -> desc .die_dt & DT_MASK ) != DT_FEMPTY ) {
725730 dma_rmb ();
726731 pkt_len = le16_to_cpu (desc -> desc .info_ds ) & RX_DS ;
727- skb = gq -> skbs [gq -> cur ];
728- gq -> skbs [gq -> cur ] = NULL ;
729732 dma_addr = rswitch_desc_get_dptr (& desc -> desc );
730- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ , DMA_FROM_DEVICE );
733+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
734+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
735+ skb = build_skb (gq -> rx_bufs [gq -> cur ], RSWITCH_BUF_SIZE );
736+ if (!skb )
737+ goto out ;
738+ skb_reserve (skb , RSWITCH_HEADROOM );
739+ skb_put (skb , pkt_len );
740+
731741 get_ts = rdev -> priv -> ptp_priv -> tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT ;
732742 if (get_ts ) {
733743 struct skb_shared_hwtstamps * shhwtstamps ;
@@ -739,12 +749,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
739749 ts .tv_nsec = __le32_to_cpu (desc -> ts_nsec & cpu_to_le32 (0x3fffffff ));
740750 shhwtstamps -> hwtstamp = timespec64_to_ktime (ts );
741751 }
742- skb_put (skb , pkt_len );
743752 skb -> protocol = eth_type_trans (skb , ndev );
744753 napi_gro_receive (& rdev -> napi , skb );
745754 rdev -> ndev -> stats .rx_packets ++ ;
746755 rdev -> ndev -> stats .rx_bytes += pkt_len ;
747756
757+ out :
758+ gq -> rx_bufs [gq -> cur ] = NULL ;
748759 gq -> cur = rswitch_next_queue_index (gq , true, 1 );
749760 desc = & gq -> rx_ring [gq -> cur ];
750761
@@ -753,7 +764,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
753764 }
754765
755766 num = rswitch_get_num_cur_queues (gq );
756- ret = rswitch_gwca_queue_alloc_skb (gq , gq -> dirty , num );
767+ ret = rswitch_gwca_queue_alloc_rx_buf (gq , gq -> dirty , num );
757768 if (ret < 0 )
758769 goto err ;
759770 ret = rswitch_gwca_queue_ext_ts_fill (ndev , gq , gq -> dirty , num );
0 commit comments