@@ -47,160 +47,41 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
4747 spin_unlock_irqrestore (& umem -> xsk_tx_list_lock , flags );
4848}
4949
50- /* The umem is stored both in the _rx struct and the _tx struct as we do
51- * not know if the device has more tx queues than rx, or the opposite.
52- * This might also change during run time.
53- */
54- static int xsk_reg_pool_at_qid (struct net_device * dev ,
55- struct xsk_buff_pool * pool ,
56- u16 queue_id )
57- {
58- if (queue_id >= max_t (unsigned int ,
59- dev -> real_num_rx_queues ,
60- dev -> real_num_tx_queues ))
61- return - EINVAL ;
62-
63- if (queue_id < dev -> real_num_rx_queues )
64- dev -> _rx [queue_id ].pool = pool ;
65- if (queue_id < dev -> real_num_tx_queues )
66- dev -> _tx [queue_id ].pool = pool ;
67-
68- return 0 ;
69- }
70-
71- struct xsk_buff_pool * xsk_get_pool_from_qid (struct net_device * dev ,
72- u16 queue_id )
50+ static void xdp_umem_unpin_pages (struct xdp_umem * umem )
7351{
74- if (queue_id < dev -> real_num_rx_queues )
75- return dev -> _rx [queue_id ].pool ;
76- if (queue_id < dev -> real_num_tx_queues )
77- return dev -> _tx [queue_id ].pool ;
52+ unpin_user_pages_dirty_lock (umem -> pgs , umem -> npgs , true);
7853
79- return NULL ;
54+ kfree (umem -> pgs );
55+ umem -> pgs = NULL ;
8056}
81- EXPORT_SYMBOL (xsk_get_pool_from_qid );
8257
83- static void xsk_clear_pool_at_qid (struct net_device * dev , u16 queue_id )
58+ static void xdp_umem_unaccount_pages (struct xdp_umem * umem )
8459{
85- if (queue_id < dev -> real_num_rx_queues )
86- dev -> _rx [ queue_id ]. pool = NULL ;
87- if ( queue_id < dev -> real_num_tx_queues )
88- dev -> _tx [ queue_id ]. pool = NULL ;
60+ if (umem -> user ) {
61+ atomic_long_sub ( umem -> npgs , & umem -> user -> locked_vm ) ;
62+ free_uid ( umem -> user );
63+ }
8964}
9065
91- int xdp_umem_assign_dev (struct xdp_umem * umem , struct net_device * dev ,
92- u16 queue_id , u16 flags )
66+ void xdp_umem_assign_dev (struct xdp_umem * umem , struct net_device * dev ,
67+ u16 queue_id )
9368{
94- bool force_zc , force_copy ;
95- struct netdev_bpf bpf ;
96- int err = 0 ;
97-
98- ASSERT_RTNL ();
99-
100- force_zc = flags & XDP_ZEROCOPY ;
101- force_copy = flags & XDP_COPY ;
102-
103- if (force_zc && force_copy )
104- return - EINVAL ;
105-
106- if (xsk_get_pool_from_qid (dev , queue_id ))
107- return - EBUSY ;
108-
109- err = xsk_reg_pool_at_qid (dev , umem -> pool , queue_id );
110- if (err )
111- return err ;
112-
11369 umem -> dev = dev ;
11470 umem -> queue_id = queue_id ;
11571
116- if (flags & XDP_USE_NEED_WAKEUP ) {
117- umem -> flags |= XDP_UMEM_USES_NEED_WAKEUP ;
118- /* Tx needs to be explicitly woken up the first time.
119- * Also for supporting drivers that do not implement this
120- * feature. They will always have to call sendto().
121- */
122- xsk_set_tx_need_wakeup (umem -> pool );
123- }
124-
12572 dev_hold (dev );
126-
127- if (force_copy )
128- /* For copy-mode, we are done. */
129- return 0 ;
130-
131- if (!dev -> netdev_ops -> ndo_bpf || !dev -> netdev_ops -> ndo_xsk_wakeup ) {
132- err = - EOPNOTSUPP ;
133- goto err_unreg_umem ;
134- }
135-
136- bpf .command = XDP_SETUP_XSK_POOL ;
137- bpf .xsk .pool = umem -> pool ;
138- bpf .xsk .queue_id = queue_id ;
139-
140- err = dev -> netdev_ops -> ndo_bpf (dev , & bpf );
141- if (err )
142- goto err_unreg_umem ;
143-
144- umem -> zc = true;
145- return 0 ;
146-
147- err_unreg_umem :
148- if (!force_zc )
149- err = 0 ; /* fallback to copy mode */
150- if (err )
151- xsk_clear_pool_at_qid (dev , queue_id );
152- return err ;
15373}
15474
15575void xdp_umem_clear_dev (struct xdp_umem * umem )
15676{
157- struct netdev_bpf bpf ;
158- int err ;
159-
160- ASSERT_RTNL ();
161-
162- if (!umem -> dev )
163- return ;
164-
165- if (umem -> zc ) {
166- bpf .command = XDP_SETUP_XSK_POOL ;
167- bpf .xsk .pool = NULL ;
168- bpf .xsk .queue_id = umem -> queue_id ;
169-
170- err = umem -> dev -> netdev_ops -> ndo_bpf (umem -> dev , & bpf );
171-
172- if (err )
173- WARN (1 , "failed to disable umem!\n" );
174- }
175-
176- xsk_clear_pool_at_qid (umem -> dev , umem -> queue_id );
177-
17877 dev_put (umem -> dev );
17978 umem -> dev = NULL ;
18079 umem -> zc = false;
18180}
18281
183- static void xdp_umem_unpin_pages (struct xdp_umem * umem )
184- {
185- unpin_user_pages_dirty_lock (umem -> pgs , umem -> npgs , true);
186-
187- kfree (umem -> pgs );
188- umem -> pgs = NULL ;
189- }
190-
191- static void xdp_umem_unaccount_pages (struct xdp_umem * umem )
192- {
193- if (umem -> user ) {
194- atomic_long_sub (umem -> npgs , & umem -> user -> locked_vm );
195- free_uid (umem -> user );
196- }
197- }
198-
19982static void xdp_umem_release (struct xdp_umem * umem )
20083{
201- rtnl_lock ();
20284 xdp_umem_clear_dev (umem );
203- rtnl_unlock ();
20485
20586 ida_simple_remove (& umem_ida , umem -> id );
20687
@@ -214,20 +95,12 @@ static void xdp_umem_release(struct xdp_umem *umem)
21495 umem -> cq = NULL ;
21596 }
21697
217- xp_destroy (umem -> pool );
21898 xdp_umem_unpin_pages (umem );
21999
220100 xdp_umem_unaccount_pages (umem );
221101 kfree (umem );
222102}
223103
224- static void xdp_umem_release_deferred (struct work_struct * work )
225- {
226- struct xdp_umem * umem = container_of (work , struct xdp_umem , work );
227-
228- xdp_umem_release (umem );
229- }
230-
231104void xdp_get_umem (struct xdp_umem * umem )
232105{
233106 refcount_inc (& umem -> users );
@@ -238,10 +111,8 @@ void xdp_put_umem(struct xdp_umem *umem)
238111 if (!umem )
239112 return ;
240113
241- if (refcount_dec_and_test (& umem -> users )) {
242- INIT_WORK (& umem -> work , xdp_umem_release_deferred );
243- schedule_work (& umem -> work );
244- }
114+ if (refcount_dec_and_test (& umem -> users ))
115+ xdp_umem_release (umem );
245116}
246117
247118static int xdp_umem_pin_pages (struct xdp_umem * umem , unsigned long address )
@@ -357,6 +228,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
357228 umem -> size = size ;
358229 umem -> headroom = headroom ;
359230 umem -> chunk_size = chunk_size ;
231+ umem -> chunks = chunks ;
360232 umem -> npgs = (u32 )npgs ;
361233 umem -> pgs = NULL ;
362234 umem -> user = NULL ;
@@ -374,16 +246,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
374246 if (err )
375247 goto out_account ;
376248
377- umem -> pool = xp_create (umem , chunks , chunk_size , headroom , size ,
378- unaligned_chunks );
379- if (!umem -> pool ) {
380- err = - ENOMEM ;
381- goto out_pin ;
382- }
383249 return 0 ;
384250
385- out_pin :
386- xdp_umem_unpin_pages (umem );
387251out_account :
388252 xdp_umem_unaccount_pages (umem );
389253 return err ;
0 commit comments