|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */ |
| 3 | + |
| 4 | +#include <test_progs.h> |
| 5 | +#include <bpf/libbpf.h> |
| 6 | +#include "cgroup_helpers.h" |
| 7 | +#include "test_for_each_cpu.skel.h" |
| 8 | + |
| 9 | +static void verify_percpu_psi_value(struct test_for_each_cpu *skel, int fd, __u32 running, int res) |
| 10 | +{ |
| 11 | + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); |
| 12 | + union bpf_iter_link_info linfo; |
| 13 | + int len, iter_fd, result; |
| 14 | + struct bpf_link *link; |
| 15 | + static char buf[128]; |
| 16 | + __u32 nr_running; |
| 17 | + size_t left; |
| 18 | + char *p; |
| 19 | + |
| 20 | + memset(&linfo, 0, sizeof(linfo)); |
| 21 | + linfo.cgroup.cgroup_fd = fd; |
| 22 | + linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; |
| 23 | + opts.link_info = &linfo; |
| 24 | + opts.link_info_len = sizeof(linfo); |
| 25 | + |
| 26 | + link = bpf_program__attach_iter(skel->progs.psi_cgroup, &opts); |
| 27 | + if (!ASSERT_OK_PTR(link, "attach_iter")) |
| 28 | + return; |
| 29 | + |
| 30 | + iter_fd = bpf_iter_create(bpf_link__fd(link)); |
| 31 | + if (!ASSERT_GE(iter_fd, 0, "iter_fd")) |
| 32 | + goto free_link; |
| 33 | + |
| 34 | + memset(buf, 0, sizeof(buf)); |
| 35 | + left = ARRAY_SIZE(buf); |
| 36 | + p = buf; |
| 37 | + while ((len = read(iter_fd, p, left)) > 0) { |
| 38 | + p += len; |
| 39 | + left -= len; |
| 40 | + } |
| 41 | + |
| 42 | + ASSERT_EQ(sscanf(buf, "nr_running %u ret %d\n", &nr_running, &result), 2, "seq_format"); |
| 43 | + ASSERT_EQ(result, res, "for_each_cpu_result"); |
| 44 | + if (running) |
| 45 | + ASSERT_GE(nr_running, running, "nr_running"); |
| 46 | + else |
| 47 | + ASSERT_EQ(nr_running, running, "nr_running"); |
| 48 | + |
| 49 | + /* read() after iter finishes should be ok. */ |
| 50 | + if (len == 0) |
| 51 | + ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read"); |
| 52 | + close(iter_fd); |
| 53 | +free_link: |
| 54 | + bpf_link__destroy(link); |
| 55 | +} |
| 56 | + |
| 57 | +void test_root_cgroup(struct test_for_each_cpu *skel) |
| 58 | +{ |
| 59 | + int cgrp_fd, nr_cpus; |
| 60 | + |
| 61 | + cgrp_fd = get_root_cgroup(); |
| 62 | + if (!ASSERT_GE(cgrp_fd, 0, "create cgrp")) |
| 63 | + return; |
| 64 | + |
| 65 | + skel->bss->cpu_mask = CPU_MASK_POSSIBLE; |
| 66 | + skel->bss->pid = 0; |
| 67 | + nr_cpus = bpf_num_possible_cpus(); |
| 68 | + /* At least current is running */ |
| 69 | + verify_percpu_psi_value(skel, cgrp_fd, 1, nr_cpus); |
| 70 | + close(cgrp_fd); |
| 71 | +} |
| 72 | + |
| 73 | +void test_child_cgroup(struct test_for_each_cpu *skel) |
| 74 | +{ |
| 75 | + int cgrp_fd, nr_cpus; |
| 76 | + |
| 77 | + cgrp_fd = create_and_get_cgroup("for_each_cpu"); |
| 78 | + if (!ASSERT_GE(cgrp_fd, 0, "create cgrp")) |
| 79 | + return; |
| 80 | + |
| 81 | + skel->bss->cpu_mask = CPU_MASK_POSSIBLE; |
| 82 | + skel->bss->pid = 0; |
| 83 | + nr_cpus = bpf_num_possible_cpus(); |
| 84 | + /* No tasks in the cgroup */ |
| 85 | + verify_percpu_psi_value(skel, cgrp_fd, 0, nr_cpus); |
| 86 | + close(cgrp_fd); |
| 87 | + remove_cgroup("for_each_cpu"); |
| 88 | +} |
| 89 | + |
| 90 | +void verify_invalid_cpumask(struct test_for_each_cpu *skel, int fd, __u32 cpumask, __u32 pid) |
| 91 | +{ |
| 92 | + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); |
| 93 | + |
| 94 | + skel->bss->cpu_mask = cpumask; |
| 95 | + skel->bss->pid = pid; |
| 96 | + verify_percpu_psi_value(skel, fd, 0, -EINVAL); |
| 97 | +} |
| 98 | + |
| 99 | +void test_invalid_cpumask(struct test_for_each_cpu *skel) |
| 100 | +{ |
| 101 | + int cgrp_fd; |
| 102 | + |
| 103 | + cgrp_fd = create_and_get_cgroup("for_each_cpu"); |
| 104 | + if (!ASSERT_GE(cgrp_fd, 0, "create cgrp")) |
| 105 | + return; |
| 106 | + |
| 107 | + verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_POSSIBLE, 1); |
| 108 | + verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_PRESENT, 1); |
| 109 | + verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_ONLINE, 1); |
| 110 | + verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_TASK, 0); |
| 111 | + verify_invalid_cpumask(skel, cgrp_fd, -1, 0); |
| 112 | + verify_invalid_cpumask(skel, cgrp_fd, -1, 1); |
| 113 | + close(cgrp_fd); |
| 114 | + remove_cgroup("for_each_cpu"); |
| 115 | +} |
| 116 | + |
| 117 | +void test_for_each_cpu(void) |
| 118 | +{ |
| 119 | + struct test_for_each_cpu *skel = NULL; |
| 120 | + |
| 121 | + skel = test_for_each_cpu__open_and_load(); |
| 122 | + if (!ASSERT_OK_PTR(skel, "test_for_each_cpu__open_and_load")) |
| 123 | + return; |
| 124 | + |
| 125 | + if (setup_cgroup_environment()) |
| 126 | + return; |
| 127 | + |
| 128 | + if (test__start_subtest("psi_system")) |
| 129 | + test_root_cgroup(skel); |
| 130 | + if (test__start_subtest("psi_cgroup")) |
| 131 | + test_child_cgroup(skel); |
| 132 | + if (test__start_subtest("invalid_cpumask")) |
| 133 | + test_invalid_cpumask(skel); |
| 134 | + |
| 135 | + test_for_each_cpu__destroy(skel); |
| 136 | + cleanup_cgroup_environment(); |
| 137 | +} |
0 commit comments