Skip to content

Commit f8c423d

Browse files
author
Martin KaFai Lau
committed
Merge branch 'selftests/bpf: Add sockaddr tests for kernel networking'
Jordan Rife says: ==================== This patch series adds test coverage for BPF sockaddr hooks and their interactions with kernel socket functions (i.e. kernel_bind(), kernel_connect(), kernel_sendmsg(), sock_sendmsg(), kernel_getpeername(), and kernel_getsockname()) while also rounding out IPv4 and IPv6 sockaddr hook coverage in prog_tests/sock_addr.c. As with v1 of this patch series, we add regression coverage for the issues addressed by these patches, - commit 0bdf399("net: Avoid address overwrite in kernel_connect") - commit 86a7e0b("net: prevent rewrite of msg_name in sock_sendmsg()") - commit c889a99("net: prevent address rewrite in kernel_bind()") - commit 01b2885("net: Save and restore msg_namelen in sock_sendmsg") but broaden the focus a bit. In order to extend prog_tests/sock_addr.c to test these kernel functions, we add a set of new kfuncs that wrap individual socket operations to bpf_testmod and invoke them through set of corresponding SYSCALL programs (progs/sock_addr_kern.c). Each test case can be configured to use a different set of "sock_ops" depending on whether it is testing kernel calls (kernel_bind(), kernel_connect(), etc.) or system calls (bind(), connect(), etc.). ======= Patches ======= * Patch 1 fixes the sock_addr bind test program to work for big endian architectures such as s390x. * Patch 2 introduces the new kfuncs to bpf_testmod. * Patch 3 introduces the BPF program which allows us to invoke these kfuncs invividually from the test program. * Patch 4 lays the groundwork for IPv4 and IPv6 sockaddr hook coverage by migrating much of the environment setup logic from bpf/test_sock_addr.sh into prog_tests/sock_addr.c and moves test cases to cover bind4/6, connect4/6, sendmsg4/6 and recvmsg4/6 hooks. * Patch 5 makes the set of socket operations for each test case configurable, laying the groundwork for Patch 6. * Patch 6 introduces two sets of sock_ops that invoke the kernel equivalents of connect(), bind(), etc. and uses these to add coverage for the kernel socket functions. ======= Changes ======= v2->v3 ------ * Renamed bind helpers. Dropped "_ntoh" suffix. * Added guards to kfuncs to make sure addrlen and msglen do not exceed the buffer capacity. * Added KF_SLEEPABLE flag to kfuncs. * Added a mutex (sock_lock) to kfuncs to serialize access to sock. * Added NULL check for sock to each kfunc. * Use the "sock_addr" networking namespace for all network interface setup and testing. * Use "nodad" when calling "ip -6 addr add" during interface setup to avoid delays and remove ping loop. * Removed test cases from test_sock_addr.c to make it clear what remains to be migrated. * Removed unused parameter (expect_change) from sock_addr_op(). Link: https://lore.kernel.org/bpf/20240412165230.2009746-1-jrife@google.com/T/#u v1->v2 ------ * Dropped test_progs/sock_addr_kern.c and the sock_addr_kern test module in favor of simply expanding bpf_testmod and test_progs/sock_addr.c. * Migrated environment setup logic from bpf/test_sock_addr.sh into prog_tests/sock_addr.c rather than invoking the script from the test program. * Added kfuncs to bpf_testmod as well as the sock_addr_kern BPF program to enable us to invoke kernel socket functions from test_progs/sock_addr.c. * Added test coverage for kernel socket functions to test_progs/sock_addr.c. Link: https://lore.kernel.org/bpf/20240329191907.1808635-1-jrife@google.com/T/#u ==================== Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
2 parents 08e90da + e0c8a7e commit f8c423d

File tree

8 files changed

+1193
-341
lines changed

8 files changed

+1193
-341
lines changed

tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c

Lines changed: 255 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,30 @@
1010
#include <linux/percpu-defs.h>
1111
#include <linux/sysfs.h>
1212
#include <linux/tracepoint.h>
13+
#include <linux/net.h>
14+
#include <linux/socket.h>
15+
#include <linux/nsproxy.h>
16+
#include <linux/inet.h>
17+
#include <linux/in.h>
18+
#include <linux/in6.h>
19+
#include <linux/un.h>
20+
#include <net/sock.h>
1321
#include "bpf_testmod.h"
1422
#include "bpf_testmod_kfunc.h"
1523

1624
#define CREATE_TRACE_POINTS
1725
#include "bpf_testmod-events.h"
1826

27+
#define CONNECT_TIMEOUT_SEC 1
28+
1929
typedef int (*func_proto_typedef)(long);
2030
typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
2131
typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
2232

2333
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
2434
long bpf_testmod_test_struct_arg_result;
35+
static DEFINE_MUTEX(sock_lock);
36+
static struct socket *sock;
2537

2638
struct bpf_testmod_struct_arg_1 {
2739
int a;
@@ -498,6 +510,237 @@ __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
498510
{
499511
}
500512

513+
__bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
514+
{
515+
int proto;
516+
int err;
517+
518+
mutex_lock(&sock_lock);
519+
520+
if (sock) {
521+
pr_err("%s called without releasing old sock", __func__);
522+
err = -EPERM;
523+
goto out;
524+
}
525+
526+
switch (args->af) {
527+
case AF_INET:
528+
case AF_INET6:
529+
proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
530+
break;
531+
case AF_UNIX:
532+
proto = PF_UNIX;
533+
break;
534+
default:
535+
pr_err("invalid address family %d\n", args->af);
536+
err = -EINVAL;
537+
goto out;
538+
}
539+
540+
err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
541+
proto, &sock);
542+
543+
if (!err)
544+
/* Set timeout for call to kernel_connect() to prevent it from hanging,
545+
* and consider the connection attempt failed if it returns
546+
* -EINPROGRESS.
547+
*/
548+
sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
549+
out:
550+
mutex_unlock(&sock_lock);
551+
552+
return err;
553+
}
554+
555+
__bpf_kfunc void bpf_kfunc_close_sock(void)
556+
{
557+
mutex_lock(&sock_lock);
558+
559+
if (sock) {
560+
sock_release(sock);
561+
sock = NULL;
562+
}
563+
564+
mutex_unlock(&sock_lock);
565+
}
566+
567+
__bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
568+
{
569+
int err;
570+
571+
if (args->addrlen > sizeof(args->addr))
572+
return -EINVAL;
573+
574+
mutex_lock(&sock_lock);
575+
576+
if (!sock) {
577+
pr_err("%s called without initializing sock", __func__);
578+
err = -EPERM;
579+
goto out;
580+
}
581+
582+
err = kernel_connect(sock, (struct sockaddr *)&args->addr,
583+
args->addrlen, 0);
584+
out:
585+
mutex_unlock(&sock_lock);
586+
587+
return err;
588+
}
589+
590+
__bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
591+
{
592+
int err;
593+
594+
if (args->addrlen > sizeof(args->addr))
595+
return -EINVAL;
596+
597+
mutex_lock(&sock_lock);
598+
599+
if (!sock) {
600+
pr_err("%s called without initializing sock", __func__);
601+
err = -EPERM;
602+
goto out;
603+
}
604+
605+
err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
606+
out:
607+
mutex_unlock(&sock_lock);
608+
609+
return err;
610+
}
611+
612+
__bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
613+
{
614+
int err;
615+
616+
mutex_lock(&sock_lock);
617+
618+
if (!sock) {
619+
pr_err("%s called without initializing sock", __func__);
620+
err = -EPERM;
621+
goto out;
622+
}
623+
624+
err = kernel_listen(sock, 128);
625+
out:
626+
mutex_unlock(&sock_lock);
627+
628+
return err;
629+
}
630+
631+
__bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
632+
{
633+
struct msghdr msg = {
634+
.msg_name = &args->addr.addr,
635+
.msg_namelen = args->addr.addrlen,
636+
};
637+
struct kvec iov;
638+
int err;
639+
640+
if (args->addr.addrlen > sizeof(args->addr.addr) ||
641+
args->msglen > sizeof(args->msg))
642+
return -EINVAL;
643+
644+
iov.iov_base = args->msg;
645+
iov.iov_len = args->msglen;
646+
647+
mutex_lock(&sock_lock);
648+
649+
if (!sock) {
650+
pr_err("%s called without initializing sock", __func__);
651+
err = -EPERM;
652+
goto out;
653+
}
654+
655+
err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
656+
args->addr.addrlen = msg.msg_namelen;
657+
out:
658+
mutex_unlock(&sock_lock);
659+
660+
return err;
661+
}
662+
663+
__bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
664+
{
665+
struct msghdr msg = {
666+
.msg_name = &args->addr.addr,
667+
.msg_namelen = args->addr.addrlen,
668+
};
669+
struct kvec iov;
670+
int err;
671+
672+
if (args->addr.addrlen > sizeof(args->addr.addr) ||
673+
args->msglen > sizeof(args->msg))
674+
return -EINVAL;
675+
676+
iov.iov_base = args->msg;
677+
iov.iov_len = args->msglen;
678+
679+
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
680+
mutex_lock(&sock_lock);
681+
682+
if (!sock) {
683+
pr_err("%s called without initializing sock", __func__);
684+
err = -EPERM;
685+
goto out;
686+
}
687+
688+
err = sock_sendmsg(sock, &msg);
689+
args->addr.addrlen = msg.msg_namelen;
690+
out:
691+
mutex_unlock(&sock_lock);
692+
693+
return err;
694+
}
695+
696+
__bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
697+
{
698+
int err;
699+
700+
mutex_lock(&sock_lock);
701+
702+
if (!sock) {
703+
pr_err("%s called without initializing sock", __func__);
704+
err = -EPERM;
705+
goto out;
706+
}
707+
708+
err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
709+
if (err < 0)
710+
goto out;
711+
712+
args->addrlen = err;
713+
err = 0;
714+
out:
715+
mutex_unlock(&sock_lock);
716+
717+
return err;
718+
}
719+
720+
__bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
721+
{
722+
int err;
723+
724+
mutex_lock(&sock_lock);
725+
726+
if (!sock) {
727+
pr_err("%s called without initializing sock", __func__);
728+
err = -EPERM;
729+
goto out;
730+
}
731+
732+
err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
733+
if (err < 0)
734+
goto out;
735+
736+
args->addrlen = err;
737+
err = 0;
738+
out:
739+
mutex_unlock(&sock_lock);
740+
741+
return err;
742+
}
743+
501744
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
502745
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
503746
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -525,6 +768,15 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
525768
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
526769
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
527770
BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
771+
BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
772+
BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
773+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
774+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
775+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
776+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
777+
BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
778+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
779+
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
528780
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
529781

530782
static int bpf_testmod_ops_init(struct btf *btf)
@@ -655,6 +907,8 @@ static int bpf_testmod_init(void)
655907
return ret;
656908
if (bpf_fentry_test1(0) < 0)
657909
return -EINVAL;
910+
sock = NULL;
911+
mutex_init(&sock_lock);
658912
return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
659913
}
660914

@@ -668,6 +922,7 @@ static void bpf_testmod_exit(void)
668922
while (refcount_read(&prog_test_struct.cnt) > 1)
669923
msleep(20);
670924

925+
bpf_kfunc_close_sock();
671926
sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
672927
}
673928

tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,22 @@ struct prog_test_fail3 {
6464
char arr2[];
6565
};
6666

67+
struct init_sock_args {
68+
int af;
69+
int type;
70+
};
71+
72+
struct addr_args {
73+
char addr[sizeof(struct __kernel_sockaddr_storage)];
74+
int addrlen;
75+
};
76+
77+
struct sendmsg_args {
78+
struct addr_args addr;
79+
char msg[10];
80+
int msglen;
81+
};
82+
6783
struct prog_test_ref_kfunc *
6884
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym;
6985
void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
@@ -107,4 +123,15 @@ void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
107123
void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
108124

109125
void bpf_kfunc_common_test(void) __ksym;
126+
127+
int bpf_kfunc_init_sock(struct init_sock_args *args) __ksym;
128+
void bpf_kfunc_close_sock(void) __ksym;
129+
int bpf_kfunc_call_kernel_connect(struct addr_args *args) __ksym;
130+
int bpf_kfunc_call_kernel_bind(struct addr_args *args) __ksym;
131+
int bpf_kfunc_call_kernel_listen(void) __ksym;
132+
int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) __ksym;
133+
int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym;
134+
int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym;
135+
int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym;
136+
110137
#endif /* _BPF_TESTMOD_KFUNC_H */

0 commit comments

Comments
 (0)