Skip to content

Commit

Permalink
Merge branch 'fix-libbpf-s-bpf_object-and-bpf-subskel-interoperability'
Browse files Browse the repository at this point in the history
Andrii Nakryiko says:

====================
Fix libbpf's bpf_object and BPF subskel interoperability

Fix libbpf's global data map mmap()'ing logic to make BPF objects loaded
through generic bpf_object__load() API interoperable with BPF subskeleton
instantiated from such BPF object. The issue is in re-mmap()'ing of global
data maps after BPF object is loaded into kernel, which is currently done in
BPF skeleton-specific code, and should instead be done in generic and common
bpf_object_load() logic.

See patch #2 for the fix, patch #3 for the selftests.  Patch #1 is preliminary
fix for existing spin_lock selftests which currently works by accident.
====================

Link: https://lore.kernel.org/r/20241023043908.3834423-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Oct 24, 2024
2 parents c94ffb3 + 80a5456 commit 39b8ab1
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 46 deletions.
83 changes: 40 additions & 43 deletions tools/lib/bpf/libbpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -5122,6 +5122,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
enum libbpf_map_type map_type = map->libbpf_type;
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
size_t mmap_sz;

if (obj->gen_loader) {
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
Expand All @@ -5135,8 +5136,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("Error setting initial map(%s) contents: %s\n",
map->name, cp);
pr_warn("map '%s': failed to set initial contents: %s\n",
bpf_map__name(map), cp);
return err;
}

Expand All @@ -5146,11 +5147,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("Error freezing map(%s) as read-only: %s\n",
map->name, cp);
pr_warn("map '%s': failed to freeze as read-only: %s\n",
bpf_map__name(map), cp);
return err;
}
}

/* Remap anonymous mmap()-ed "map initialization image" as
* a BPF map-backed mmap()-ed memory, but preserving the same
* memory address. This will cause kernel to change process'
* page table to point to a different piece of kernel memory,
* but from userspace point of view memory address (and its
* contents, being identical at this point) will stay the
* same. This mapping will be released by bpf_object__close()
* as per normal clean up procedure.
*/
mmap_sz = bpf_map_mmap_sz(map);
if (map->def.map_flags & BPF_F_MMAPABLE) {
void *mmaped;
int prot;

if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
prot = PROT_READ | PROT_WRITE;
mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
if (mmaped == MAP_FAILED) {
err = -errno;
pr_warn("map '%s': failed to re-mmap() contents: %d\n",
bpf_map__name(map), err);
return err;
}
map->mmaped = mmaped;
} else if (map->mmaped) {
munmap(map->mmaped, mmap_sz);
map->mmaped = NULL;
}

return 0;
}

Expand Down Expand Up @@ -5467,8 +5500,7 @@ bpf_object__create_maps(struct bpf_object *obj)
err = bpf_object__populate_internal_map(obj, map);
if (err < 0)
goto err_out;
}
if (map->def.type == BPF_MAP_TYPE_ARENA) {
} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
map->mmaped = mmap((void *)(long)map->map_extra,
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
Expand Down Expand Up @@ -13916,46 +13948,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = map_skel->mmaped;

if (!mmaped)
continue;

if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
*mmaped = NULL;
continue;
}

if (map->def.type == BPF_MAP_TYPE_ARENA) {
*mmaped = map->mmaped;
if (!map_skel->mmaped)
continue;
}

if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
prot = PROT_READ | PROT_WRITE;

/* Remap anonymous mmap()-ed "map initialization image" as
* a BPF map-backed mmap()-ed memory, but preserving the same
* memory address. This will cause kernel to change process'
* page table to point to a different piece of kernel memory,
* but from userspace point of view memory address (and its
* contents, being identical at this point) will stay the
* same. This mapping will be released by bpf_object__close()
* as per normal clean up procedure, so we don't need to worry
* about it from skeleton's clean up perspective.
*/
*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
if (*mmaped == MAP_FAILED) {
err = -errno;
*mmaped = NULL;
pr_warn("failed to re-mmap() map '%s': %d\n",
bpf_map__name(map), err);
return libbpf_err(err);
}
*map_skel->mmaped = map->mmaped;
}

return 0;
Expand Down
76 changes: 75 additions & 1 deletion tools/testing/selftests/bpf/prog_tests/subskeleton.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ static int subskeleton_lib_subresult(struct bpf_object *obj)
return result;
}

void test_subskeleton(void)
/* initialize and load through skeleton, then instantiate subskeleton out of it */
static void subtest_skel_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
Expand Down Expand Up @@ -76,3 +77,76 @@ void test_subskeleton(void)
cleanup:
test_subskeleton__destroy(skel);
}

/* initialize and load through generic bpf_object API, then instantiate subskeleton out of it */
static void subtest_obj_subskeleton(void)
{
int err, result;
const void *elf_bytes;
size_t elf_bytes_sz = 0, rodata_sz = 0, bss_sz = 0;
struct bpf_object *obj;
const struct bpf_map *map;
const struct bpf_program *prog;
struct bpf_link *link = NULL;
struct test_subskeleton__rodata *rodata;
struct test_subskeleton__bss *bss;

elf_bytes = test_subskeleton__elf_bytes(&elf_bytes_sz);
if (!ASSERT_OK_PTR(elf_bytes, "elf_bytes"))
return;

obj = bpf_object__open_mem(elf_bytes, elf_bytes_sz, NULL);
if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
return;

map = bpf_object__find_map_by_name(obj, ".rodata");
if (!ASSERT_OK_PTR(map, "rodata_map_by_name"))
goto cleanup;

rodata = bpf_map__initial_value(map, &rodata_sz);
if (!ASSERT_OK_PTR(rodata, "rodata_get"))
goto cleanup;

rodata->rovar1 = 10;
rodata->var1 = 1;
subskeleton_lib_setup(obj);

err = bpf_object__load(obj);
if (!ASSERT_OK(err, "obj_load"))
goto cleanup;

prog = bpf_object__find_program_by_name(obj, "handler1");
if (!ASSERT_OK_PTR(prog, "prog_by_name"))
goto cleanup;

link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "prog_attach"))
goto cleanup;

/* trigger tracepoint */
usleep(1);

map = bpf_object__find_map_by_name(obj, ".bss");
if (!ASSERT_OK_PTR(map, "bss_map_by_name"))
goto cleanup;

bss = bpf_map__initial_value(map, &bss_sz);
if (!ASSERT_OK_PTR(rodata, "rodata_get"))
goto cleanup;

result = subskeleton_lib_subresult(obj) * 10;
ASSERT_EQ(bss->out1, result, "out1");

cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
}


void test_subskeleton(void)
{
if (test__start_subtest("skel_subskel"))
subtest_skel_subskeleton();
if (test__start_subtest("obj_subskel"))
subtest_obj_subskeleton();
}
4 changes: 2 additions & 2 deletions tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ struct {
},
};

SEC(".data.A") struct bpf_spin_lock lockA;
SEC(".data.B") struct bpf_spin_lock lockB;
static struct bpf_spin_lock lockA SEC(".data.A");
static struct bpf_spin_lock lockB SEC(".data.B");

SEC("?tc")
int lock_id_kptr_preserve(void *ctx)
Expand Down

0 comments on commit 39b8ab1

Please sign in to comment.