Skip to content

Commit

Permalink
selftests/bpf: Test ldsx with more complex cases
Browse files Browse the repository at this point in the history
The following ldsx cases are tested:
  - signed readonly map value
  - read/write map value
  - probed memory
  - not-narrowed ctx field access
  - narrowed ctx field access.

Without previous proper verifier/git handling, the test will fail.

If cpuv4 is not supported either by compiler or by jit,
the test will be skipped.

  # ./test_progs -t ldsx_insn
  torvalds#113/1   ldsx_insn/map_val and probed_memory:SKIP
  torvalds#113/2   ldsx_insn/ctx_member_sign_ext:SKIP
  torvalds#113/3   ldsx_insn/ctx_member_narrow_sign_ext:SKIP
  torvalds#113     ldsx_insn:SKIP
  Summary: 1/0 PASSED, 3 SKIPPED, 0 FAILED

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230728011336.3723434-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Yonghong Song authored and Alexei Starovoitov committed Jul 28, 2023
1 parent 613dad4 commit 0c60657
Show file tree
Hide file tree
Showing 3 changed files with 265 additions and 1 deletion.
9 changes: 8 additions & 1 deletion tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
return bpf_testmod_test_struct_arg_result;
}

noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
bpf_testmod_test_struct_arg_result = a->a;
return bpf_testmod_test_struct_arg_result;
}

__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
Expand Down Expand Up @@ -240,7 +246,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
.off = off,
.len = len,
};
struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
Expand All @@ -259,6 +265,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
(void *)20, struct_arg4, 23);

(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);

struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
sizeof(int)), GFP_KERNEL);
Expand Down
139 changes: 139 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/

#include <test_progs.h>
#include <network_helpers.h>
#include "test_ldsx_insn.skel.h"

static void test_map_val_and_probed_memory(void)
{
struct test_ldsx_insn *skel;
int err;

skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
return;

if (skel->rodata->skip) {
test__skip();
goto out;
}

bpf_program__set_autoload(skel->progs.rdonly_map_prog, true);
bpf_program__set_autoload(skel->progs.map_val_prog, true);
bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true);

err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto out;

err = test_ldsx_insn__attach(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__attach"))
goto out;

ASSERT_OK(trigger_module_test_read(256), "trigger_read");

ASSERT_EQ(skel->bss->done1, 1, "done1");
ASSERT_EQ(skel->bss->ret1, 1, "ret1");
ASSERT_EQ(skel->bss->done2, 1, "done2");
ASSERT_EQ(skel->bss->ret2, 1, "ret2");
ASSERT_EQ(skel->bss->int_member, -1, "int_member");

out:
test_ldsx_insn__destroy(skel);
}

static void test_ctx_member_sign_ext(void)
{
struct test_ldsx_insn *skel;
int err, fd, cgroup_fd;
char buf[16] = {0};
socklen_t optlen;

cgroup_fd = test__join_cgroup("/ldsx_test");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test"))
return;

skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
goto close_cgroup_fd;

if (skel->rodata->skip) {
test__skip();
goto destroy_skel;
}

bpf_program__set_autoload(skel->progs._getsockopt, true);

err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto destroy_skel;

skel->links._getsockopt =
bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
goto destroy_skel;

fd = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(fd, 0, "socket"))
goto destroy_skel;

optlen = sizeof(buf);
(void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen);

ASSERT_EQ(skel->bss->set_optlen, -1, "optlen");
ASSERT_EQ(skel->bss->set_retval, -1, "retval");

close(fd);
destroy_skel:
test_ldsx_insn__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}

static void test_ctx_member_narrow_sign_ext(void)
{
struct test_ldsx_insn *skel;
struct __sk_buff skb = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
);
int err, prog_fd;

skel = test_ldsx_insn__open();
if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
return;

if (skel->rodata->skip) {
test__skip();
goto out;
}

bpf_program__set_autoload(skel->progs._tc, true);

err = test_ldsx_insn__load(skel);
if (!ASSERT_OK(err, "test_ldsx_insn__load"))
goto out;

prog_fd = bpf_program__fd(skel->progs._tc);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");

ASSERT_EQ(skel->bss->set_mark, -2, "set_mark");

out:
test_ldsx_insn__destroy(skel);
}

void test_ldsx_insn(void)
{
if (test__start_subtest("map_val and probed_memory"))
test_map_val_and_probed_memory();
if (test__start_subtest("ctx_member_sign_ext"))
test_ctx_member_sign_ext();
if (test__start_subtest("ctx_member_narrow_sign_ext"))
test_ctx_member_narrow_sign_ext();
}
118 changes: 118 additions & 0 deletions tools/testing/selftests/bpf/progs/test_ldsx_insn.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */

#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>

#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
const volatile int skip = 0;
#else
const volatile int skip = 1;
#endif

volatile const short val1 = -1;
volatile const int val2 = -1;
short val3 = -1;
int val4 = -1;
int done1, done2, ret1, ret2;

SEC("?raw_tp/sys_enter")
int rdonly_map_prog(const void *ctx)
{
if (done1)
return 0;

done1 = 1;
/* val1/val2 readonly map */
if (val1 == val2)
ret1 = 1;
return 0;

}

SEC("?raw_tp/sys_enter")
int map_val_prog(const void *ctx)
{
if (done2)
return 0;

done2 = 1;
/* val1/val2 regular read/write map */
if (val3 == val4)
ret2 = 1;
return 0;

}

struct bpf_testmod_struct_arg_1 {
int a;
};

long long int_member;

SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
{
/* probed memory access */
int_member = p->a;
return 0;
}

long long set_optlen, set_retval;

SEC("?cgroup/getsockopt")
int _getsockopt(volatile struct bpf_sockopt *ctx)
{
int old_optlen, old_retval;

old_optlen = ctx->optlen;
old_retval = ctx->retval;

ctx->optlen = -1;
ctx->retval = -1;

/* sign extension for ctx member */
set_optlen = ctx->optlen;
set_retval = ctx->retval;

ctx->optlen = old_optlen;
ctx->retval = old_retval;

return 0;
}

long long set_mark;

SEC("?tc")
int _tc(volatile struct __sk_buff *skb)
{
long long tmp_mark;
int old_mark;

old_mark = skb->mark;

skb->mark = 0xf6fe;

/* narrowed sign extension for ctx member */
#if __clang_major__ >= 18
/* force narrow one-byte signed load. Otherwise, compiler may
* generate a 32-bit unsigned load followed by an s8 movsx.
*/
asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
"%[tmp_mark] = r1"
: [tmp_mark]"=r"(tmp_mark)
: [ctx]"r"(skb),
[off_mark]"i"(offsetof(struct __sk_buff, mark))
: "r1");
#else
tmp_mark = (char)skb->mark;
#endif
set_mark = tmp_mark;

skb->mark = old_mark;

return 0;
}

char _license[] SEC("license") = "GPL";

0 comments on commit 0c60657

Please sign in to comment.