Skip to content

Commit 6bd0c76

Browse files
committed
Daniel Borkmann says: ==================== pull-request: bpf 2022-03-18 We've added 2 non-merge commits during the last 18 day(s) which contain a total of 2 files changed, 50 insertions(+), 20 deletions(-). The main changes are: 1) Fix a race in XSK socket teardown code that can lead to a NULL pointer dereference, from Magnus. 2) Small MAINTAINERS doc update to remove Lorenz from sockmap, from Lorenz. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: xsk: Fix race at socket teardown bpf: Remove Lorenz Bauer from L7 BPF maintainers ==================== Link: https://lore.kernel.org/r/20220318152418.28638-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents 9905eed + 18b1ab7 commit 6bd0c76

File tree

2 files changed

+50
-20
lines changed

2 files changed

+50
-20
lines changed

MAINTAINERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10765,7 +10765,6 @@ L7 BPF FRAMEWORK
1076510765
M: John Fastabend <john.fastabend@gmail.com>
1076610766
M: Daniel Borkmann <daniel@iogearbox.net>
1076710767
M: Jakub Sitnicki <jakub@cloudflare.com>
10768-
M: Lorenz Bauer <lmb@cloudflare.com>
1076910768
L: netdev@vger.kernel.org
1077010769
L: bpf@vger.kernel.org
1077110770
S: Maintained

net/xdp/xsk.c

Lines changed: 50 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -403,18 +403,8 @@ EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
403403
static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
404404
{
405405
struct net_device *dev = xs->dev;
406-
int err;
407-
408-
rcu_read_lock();
409-
err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
410-
rcu_read_unlock();
411-
412-
return err;
413-
}
414406

415-
static int xsk_zc_xmit(struct xdp_sock *xs)
416-
{
417-
return xsk_wakeup(xs, XDP_WAKEUP_TX);
407+
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
418408
}
419409

420410
static void xsk_destruct_skb(struct sk_buff *skb)
@@ -533,6 +523,12 @@ static int xsk_generic_xmit(struct sock *sk)
533523

534524
mutex_lock(&xs->mutex);
535525

526+
/* Since we dropped the RCU read lock, the socket state might have changed. */
527+
if (unlikely(!xsk_is_bound(xs))) {
528+
err = -ENXIO;
529+
goto out;
530+
}
531+
536532
if (xs->queue_id >= xs->dev->real_num_tx_queues)
537533
goto out;
538534

@@ -596,16 +592,26 @@ static int xsk_generic_xmit(struct sock *sk)
596592
return err;
597593
}
598594

599-
static int __xsk_sendmsg(struct sock *sk)
595+
static int xsk_xmit(struct sock *sk)
600596
{
601597
struct xdp_sock *xs = xdp_sk(sk);
598+
int ret;
602599

603600
if (unlikely(!(xs->dev->flags & IFF_UP)))
604601
return -ENETDOWN;
605602
if (unlikely(!xs->tx))
606603
return -ENOBUFS;
607604

608-
return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
605+
if (xs->zc)
606+
return xsk_wakeup(xs, XDP_WAKEUP_TX);
607+
608+
/* Drop the RCU lock since the SKB path might sleep. */
609+
rcu_read_unlock();
610+
ret = xsk_generic_xmit(sk);
611+
/* Reaquire RCU lock before going into common code. */
612+
rcu_read_lock();
613+
614+
return ret;
609615
}
610616

611617
static bool xsk_no_wakeup(struct sock *sk)
@@ -619,7 +625,7 @@ static bool xsk_no_wakeup(struct sock *sk)
619625
#endif
620626
}
621627

622-
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
628+
static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
623629
{
624630
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
625631
struct sock *sk = sock->sk;
@@ -639,11 +645,22 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
639645

640646
pool = xs->pool;
641647
if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
642-
return __xsk_sendmsg(sk);
648+
return xsk_xmit(sk);
643649
return 0;
644650
}
645651

646-
static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
652+
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
653+
{
654+
int ret;
655+
656+
rcu_read_lock();
657+
ret = __xsk_sendmsg(sock, m, total_len);
658+
rcu_read_unlock();
659+
660+
return ret;
661+
}
662+
663+
static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
647664
{
648665
bool need_wait = !(flags & MSG_DONTWAIT);
649666
struct sock *sk = sock->sk;
@@ -669,6 +686,17 @@ static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int fl
669686
return 0;
670687
}
671688

689+
static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
690+
{
691+
int ret;
692+
693+
rcu_read_lock();
694+
ret = __xsk_recvmsg(sock, m, len, flags);
695+
rcu_read_unlock();
696+
697+
return ret;
698+
}
699+
672700
static __poll_t xsk_poll(struct file *file, struct socket *sock,
673701
struct poll_table_struct *wait)
674702
{
@@ -679,8 +707,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
679707

680708
sock_poll_wait(file, sock, wait);
681709

682-
if (unlikely(!xsk_is_bound(xs)))
710+
rcu_read_lock();
711+
if (unlikely(!xsk_is_bound(xs))) {
712+
rcu_read_unlock();
683713
return mask;
714+
}
684715

685716
pool = xs->pool;
686717

@@ -689,14 +720,15 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
689720
xsk_wakeup(xs, pool->cached_need_wakeup);
690721
else
691722
/* Poll needs to drive Tx also in copy mode */
692-
__xsk_sendmsg(sk);
723+
xsk_xmit(sk);
693724
}
694725

695726
if (xs->rx && !xskq_prod_is_empty(xs->rx))
696727
mask |= EPOLLIN | EPOLLRDNORM;
697728
if (xs->tx && xsk_tx_writeable(xs))
698729
mask |= EPOLLOUT | EPOLLWRNORM;
699730

731+
rcu_read_unlock();
700732
return mask;
701733
}
702734

@@ -728,7 +760,6 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
728760

729761
/* Wait for driver to stop using the xdp socket. */
730762
xp_del_xsk(xs->pool, xs);
731-
xs->dev = NULL;
732763
synchronize_net();
733764
dev_put(dev);
734765
}

0 commit comments

Comments
 (0)