@@ -104,6 +104,25 @@ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
104104}
105105EXPORT_SYMBOL (xp_set_rxq_info );
106106
107+ static void xp_disable_drv_zc (struct xsk_buff_pool * pool )
108+ {
109+ struct netdev_bpf bpf ;
110+ int err ;
111+
112+ ASSERT_RTNL ();
113+
114+ if (pool -> umem -> zc ) {
115+ bpf .command = XDP_SETUP_XSK_POOL ;
116+ bpf .xsk .pool = NULL ;
117+ bpf .xsk .queue_id = pool -> queue_id ;
118+
119+ err = pool -> netdev -> netdev_ops -> ndo_bpf (pool -> netdev , & bpf );
120+
121+ if (err )
122+ WARN (1 , "Failed to disable zero-copy!\n" );
123+ }
124+ }
125+
107126int xp_assign_dev (struct xsk_buff_pool * pool , struct net_device * netdev ,
108127 u16 queue_id , u16 flags )
109128{
@@ -122,6 +141,8 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
122141 if (xsk_get_pool_from_qid (netdev , queue_id ))
123142 return - EBUSY ;
124143
144+ pool -> netdev = netdev ;
145+ pool -> queue_id = queue_id ;
125146 err = xsk_reg_pool_at_qid (netdev , pool , queue_id );
126147 if (err )
127148 return err ;
@@ -155,11 +176,15 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
155176 if (err )
156177 goto err_unreg_pool ;
157178
158- pool -> netdev = netdev ;
159- pool -> queue_id = queue_id ;
179+ if (!pool -> dma_pages ) {
180+ WARN (1 , "Driver did not DMA map zero-copy buffers" );
181+ goto err_unreg_xsk ;
182+ }
160183 pool -> umem -> zc = true;
161184 return 0 ;
162185
186+ err_unreg_xsk :
187+ xp_disable_drv_zc (pool );
163188err_unreg_pool :
164189 if (!force_zc )
165190 err = 0 ; /* fallback to copy mode */
@@ -170,25 +195,10 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
170195
171196void xp_clear_dev (struct xsk_buff_pool * pool )
172197{
173- struct netdev_bpf bpf ;
174- int err ;
175-
176- ASSERT_RTNL ();
177-
178198 if (!pool -> netdev )
179199 return ;
180200
181- if (pool -> umem -> zc ) {
182- bpf .command = XDP_SETUP_XSK_POOL ;
183- bpf .xsk .pool = NULL ;
184- bpf .xsk .queue_id = pool -> queue_id ;
185-
186- err = pool -> netdev -> netdev_ops -> ndo_bpf (pool -> netdev , & bpf );
187-
188- if (err )
189- WARN (1 , "Failed to disable zero-copy!\n" );
190- }
191-
201+ xp_disable_drv_zc (pool );
192202 xsk_clear_pool_at_qid (pool -> netdev , pool -> queue_id );
193203 dev_put (pool -> netdev );
194204 pool -> netdev = NULL ;
@@ -233,70 +243,159 @@ void xp_put_pool(struct xsk_buff_pool *pool)
233243 }
234244}
235245
236- void xp_dma_unmap (struct xsk_buff_pool * pool , unsigned long attrs )
246+ static struct xsk_dma_map * xp_find_dma_map (struct xsk_buff_pool * pool )
247+ {
248+ struct xsk_dma_map * dma_map ;
249+
250+ list_for_each_entry (dma_map , & pool -> umem -> xsk_dma_list , list ) {
251+ if (dma_map -> netdev == pool -> netdev )
252+ return dma_map ;
253+ }
254+
255+ return NULL ;
256+ }
257+
258+ static struct xsk_dma_map * xp_create_dma_map (struct device * dev , struct net_device * netdev ,
259+ u32 nr_pages , struct xdp_umem * umem )
260+ {
261+ struct xsk_dma_map * dma_map ;
262+
263+ dma_map = kzalloc (sizeof (* dma_map ), GFP_KERNEL );
264+ if (!dma_map )
265+ return NULL ;
266+
267+ dma_map -> dma_pages = kvcalloc (nr_pages , sizeof (* dma_map -> dma_pages ), GFP_KERNEL );
268+ if (!dma_map ) {
269+ kfree (dma_map );
270+ return NULL ;
271+ }
272+
273+ dma_map -> netdev = netdev ;
274+ dma_map -> dev = dev ;
275+ dma_map -> dma_need_sync = false;
276+ dma_map -> dma_pages_cnt = nr_pages ;
277+ refcount_set (& dma_map -> users , 0 );
278+ list_add (& dma_map -> list , & umem -> xsk_dma_list );
279+ return dma_map ;
280+ }
281+
282+ static void xp_destroy_dma_map (struct xsk_dma_map * dma_map )
283+ {
284+ list_del (& dma_map -> list );
285+ kvfree (dma_map -> dma_pages );
286+ kfree (dma_map );
287+ }
288+
289+ static void __xp_dma_unmap (struct xsk_dma_map * dma_map , unsigned long attrs )
237290{
238291 dma_addr_t * dma ;
239292 u32 i ;
240293
241- if (pool -> dma_pages_cnt == 0 )
242- return ;
243-
244- for (i = 0 ; i < pool -> dma_pages_cnt ; i ++ ) {
245- dma = & pool -> dma_pages [i ];
294+ for (i = 0 ; i < dma_map -> dma_pages_cnt ; i ++ ) {
295+ dma = & dma_map -> dma_pages [i ];
246296 if (* dma ) {
247- dma_unmap_page_attrs (pool -> dev , * dma , PAGE_SIZE ,
297+ dma_unmap_page_attrs (dma_map -> dev , * dma , PAGE_SIZE ,
248298 DMA_BIDIRECTIONAL , attrs );
249299 * dma = 0 ;
250300 }
251301 }
252302
303+ xp_destroy_dma_map (dma_map );
304+ }
305+
306+ void xp_dma_unmap (struct xsk_buff_pool * pool , unsigned long attrs )
307+ {
308+ struct xsk_dma_map * dma_map ;
309+
310+ if (pool -> dma_pages_cnt == 0 )
311+ return ;
312+
313+ dma_map = xp_find_dma_map (pool );
314+ if (!dma_map ) {
315+ WARN (1 , "Could not find dma_map for device" );
316+ return ;
317+ }
318+
319+ if (!refcount_dec_and_test (& dma_map -> users ))
320+ return ;
321+
322+ __xp_dma_unmap (dma_map , attrs );
253323 kvfree (pool -> dma_pages );
254324 pool -> dma_pages_cnt = 0 ;
255325 pool -> dev = NULL ;
256326}
257327EXPORT_SYMBOL (xp_dma_unmap );
258328
259- static void xp_check_dma_contiguity (struct xsk_buff_pool * pool )
329+ static void xp_check_dma_contiguity (struct xsk_dma_map * dma_map )
260330{
261331 u32 i ;
262332
263- for (i = 0 ; i < pool -> dma_pages_cnt - 1 ; i ++ ) {
264- if (pool -> dma_pages [i ] + PAGE_SIZE == pool -> dma_pages [i + 1 ])
265- pool -> dma_pages [i ] |= XSK_NEXT_PG_CONTIG_MASK ;
333+ for (i = 0 ; i < dma_map -> dma_pages_cnt - 1 ; i ++ ) {
334+ if (dma_map -> dma_pages [i ] + PAGE_SIZE == dma_map -> dma_pages [i + 1 ])
335+ dma_map -> dma_pages [i ] |= XSK_NEXT_PG_CONTIG_MASK ;
266336 else
267- pool -> dma_pages [i ] &= ~XSK_NEXT_PG_CONTIG_MASK ;
337+ dma_map -> dma_pages [i ] &= ~XSK_NEXT_PG_CONTIG_MASK ;
268338 }
269339}
270340
341+ static int xp_init_dma_info (struct xsk_buff_pool * pool , struct xsk_dma_map * dma_map )
342+ {
343+ pool -> dma_pages = kvcalloc (dma_map -> dma_pages_cnt , sizeof (* pool -> dma_pages ), GFP_KERNEL );
344+ if (!pool -> dma_pages )
345+ return - ENOMEM ;
346+
347+ pool -> dev = dma_map -> dev ;
348+ pool -> dma_pages_cnt = dma_map -> dma_pages_cnt ;
349+ pool -> dma_need_sync = dma_map -> dma_need_sync ;
350+ refcount_inc (& dma_map -> users );
351+ memcpy (pool -> dma_pages , dma_map -> dma_pages ,
352+ pool -> dma_pages_cnt * sizeof (* pool -> dma_pages ));
353+
354+ return 0 ;
355+ }
356+
271357int xp_dma_map (struct xsk_buff_pool * pool , struct device * dev ,
272358 unsigned long attrs , struct page * * pages , u32 nr_pages )
273359{
360+ struct xsk_dma_map * dma_map ;
274361 dma_addr_t dma ;
362+ int err ;
275363 u32 i ;
276364
277- pool -> dma_pages = kvcalloc (nr_pages , sizeof (* pool -> dma_pages ),
278- GFP_KERNEL );
279- if (!pool -> dma_pages )
280- return - ENOMEM ;
365+ dma_map = xp_find_dma_map (pool );
366+ if (dma_map ) {
367+ err = xp_init_dma_info (pool , dma_map );
368+ if (err )
369+ return err ;
281370
282- pool -> dev = dev ;
283- pool -> dma_pages_cnt = nr_pages ;
284- pool -> dma_need_sync = false;
371+ return 0 ;
372+ }
285373
286- for (i = 0 ; i < pool -> dma_pages_cnt ; i ++ ) {
374+ dma_map = xp_create_dma_map (dev , pool -> netdev , nr_pages , pool -> umem );
375+ if (!dma_map )
376+ return - ENOMEM ;
377+
378+ for (i = 0 ; i < dma_map -> dma_pages_cnt ; i ++ ) {
287379 dma = dma_map_page_attrs (dev , pages [i ], 0 , PAGE_SIZE ,
288380 DMA_BIDIRECTIONAL , attrs );
289381 if (dma_mapping_error (dev , dma )) {
290- xp_dma_unmap ( pool , attrs );
382+ __xp_dma_unmap ( dma_map , attrs );
291383 return - ENOMEM ;
292384 }
293385 if (dma_need_sync (dev , dma ))
294- pool -> dma_need_sync = true;
295- pool -> dma_pages [i ] = dma ;
386+ dma_map -> dma_need_sync = true;
387+ dma_map -> dma_pages [i ] = dma ;
296388 }
297389
298390 if (pool -> unaligned )
299- xp_check_dma_contiguity (pool );
391+ xp_check_dma_contiguity (dma_map );
392+
393+ err = xp_init_dma_info (pool , dma_map );
394+ if (err ) {
395+ __xp_dma_unmap (dma_map , attrs );
396+ return err ;
397+ }
398+
300399 return 0 ;
301400}
302401EXPORT_SYMBOL (xp_dma_map );
0 commit comments