@@ -141,49 +141,6 @@ static void cpu_map_kthread_stop(struct work_struct *work)
141141 kthread_stop (rcpu -> kthread );
142142}
143143
144- static struct sk_buff * cpu_map_build_skb (struct xdp_frame * xdpf ,
145- struct sk_buff * skb )
146- {
147- unsigned int hard_start_headroom ;
148- unsigned int frame_size ;
149- void * pkt_data_start ;
150-
151- /* Part of headroom was reserved to xdpf */
152- hard_start_headroom = sizeof (struct xdp_frame ) + xdpf -> headroom ;
153-
154- /* Memory size backing xdp_frame data already have reserved
155- * room for build_skb to place skb_shared_info in tailroom.
156- */
157- frame_size = xdpf -> frame_sz ;
158-
159- pkt_data_start = xdpf -> data - hard_start_headroom ;
160- skb = build_skb_around (skb , pkt_data_start , frame_size );
161- if (unlikely (!skb ))
162- return NULL ;
163-
164- skb_reserve (skb , hard_start_headroom );
165- __skb_put (skb , xdpf -> len );
166- if (xdpf -> metasize )
167- skb_metadata_set (skb , xdpf -> metasize );
168-
169- /* Essential SKB info: protocol and skb->dev */
170- skb -> protocol = eth_type_trans (skb , xdpf -> dev_rx );
171-
172- /* Optional SKB info, currently missing:
173- * - HW checksum info (skb->ip_summed)
174- * - HW RX hash (skb_set_hash)
175- * - RX ring dev queue index (skb_record_rx_queue)
176- */
177-
178- /* Until page_pool get SKB return path, release DMA here */
179- xdp_release_frame (xdpf );
180-
181- /* Allow SKB to reuse area used by xdp_frame */
182- xdp_scrub_frame (xdpf );
183-
184- return skb ;
185- }
186-
187144static void __cpu_map_ring_cleanup (struct ptr_ring * ring )
188145{
189146 /* The tear-down procedure should have made sure that queue is
@@ -350,7 +307,8 @@ static int cpu_map_kthread_run(void *data)
350307 struct sk_buff * skb = skbs [i ];
351308 int ret ;
352309
353- skb = cpu_map_build_skb (xdpf , skb );
310+ skb = __xdp_build_skb_from_frame (xdpf , skb ,
311+ xdpf -> dev_rx );
354312 if (!skb ) {
355313 xdp_return_frame (xdpf );
356314 continue ;
0 commit comments