Skip to content

Commit

Permalink
soreuseport: fix initialization race
Browse files Browse the repository at this point in the history
[ Upstream commit 1b5f962 ]

Syzkaller stumbled upon a way to trigger
WARNING: CPU: 1 PID: 13881 at net/core/sock_reuseport.c:41
reuseport_alloc+0x306/0x3b0 net/core/sock_reuseport.c:39

There are two initialization paths for the sock_reuseport structure in a
socket: Through the udp/tcp bind paths of SO_REUSEPORT sockets or through
SO_ATTACH_REUSEPORT_[CE]BPF before bind.  The existing implementation
assumedthat the socket lock protected both of these paths when it actually
only protects the SO_ATTACH_REUSEPORT path.  Syzkaller triggered this
double allocation by running these paths concurrently.

This patch moves the check for double allocation into the reuseport_alloc
function which is protected by a global spin lock.

Fixes: e32ea7e ("soreuseport: fast reuseport UDP socket selection")
Fixes: c125e80 ("soreuseport: fast reuseport TCP socket selection")
Signed-off-by: Craig Gallek <kraig@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
kraigatgoog authored and gregkh committed Nov 18, 2017
1 parent 57ffb0e commit 3b0b4d2
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 11 deletions.
12 changes: 9 additions & 3 deletions net/core/sock_reuseport.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
* soft irq of receive path or setsockopt from process context
*/
spin_lock_bh(&reuseport_lock);
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)),
"multiple allocations for the same socket");

/* Allocation attempts can occur concurrently via the setsockopt path
* and the bind/hash path. Nothing to do when we lose the race.
*/
if (rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)))
goto out;

reuse = __reuseport_alloc(INIT_SOCKS);
if (!reuse) {
spin_unlock_bh(&reuseport_lock);
Expand All @@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
reuse->num_socks = 1;
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

out:
spin_unlock_bh(&reuseport_lock);

return 0;
Expand Down
5 changes: 1 addition & 4 deletions net/ipv4/inet_hashtables.c
Original file line number Diff line number Diff line change
Expand Up @@ -455,10 +455,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
return reuseport_add_sock(sk, sk2);
}

/* Initial allocation may have already happened via setsockopt */
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return reuseport_alloc(sk);
return 0;
return reuseport_alloc(sk);
}

int __inet_hash(struct sock *sk, struct sock *osk,
Expand Down
5 changes: 1 addition & 4 deletions net/ipv4/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,10 +222,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
}
}

/* Initial allocation may have already happened via setsockopt */
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return reuseport_alloc(sk);
return 0;
return reuseport_alloc(sk);
}

/**
Expand Down

0 comments on commit 3b0b4d2

Please sign in to comment.