| 
 | 1 | +// SPDX-License-Identifier: GPL-2.0  | 
 | 2 | +/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */  | 
 | 3 | + | 
 | 4 | +#include <string.h>  | 
 | 5 | +#include <linux/bpf.h>  | 
 | 6 | +#include <linux/limits.h>  | 
 | 7 | +#include <test_progs.h>  | 
 | 8 | +#include "trace_helpers.h"  | 
 | 9 | +#include "test_fill_link_info.skel.h"  | 
 | 10 | + | 
 | 11 | +#define TP_CAT "sched"  | 
 | 12 | +#define TP_NAME "sched_switch"  | 
 | 13 | +#define KPROBE_FUNC "tcp_rcv_established"  | 
 | 14 | +#define UPROBE_FILE "/proc/self/exe"  | 
 | 15 | + | 
 | 16 | +/* uprobe attach point */  | 
 | 17 | +static noinline void uprobe_func(void)  | 
 | 18 | +{  | 
 | 19 | +	asm volatile ("");  | 
 | 20 | +}  | 
 | 21 | + | 
 | 22 | +static int verify_link_info(int fd, enum bpf_perf_event_type type, long addr, ssize_t offset)  | 
 | 23 | +{  | 
 | 24 | +	struct bpf_link_info info;  | 
 | 25 | +	__u32 len = sizeof(info);  | 
 | 26 | +	char buf[PATH_MAX];  | 
 | 27 | +	int err = 0;  | 
 | 28 | + | 
 | 29 | +	memset(&info, 0, sizeof(info));  | 
 | 30 | +	buf[0] = '\0';  | 
 | 31 | + | 
 | 32 | +again:  | 
 | 33 | +	err = bpf_link_get_info_by_fd(fd, &info, &len);  | 
 | 34 | +	if (!ASSERT_OK(err, "get_link_info"))  | 
 | 35 | +		return -1;  | 
 | 36 | + | 
 | 37 | +	switch (info.type) {  | 
 | 38 | +	case BPF_LINK_TYPE_PERF_EVENT:  | 
 | 39 | +		if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))  | 
 | 40 | +			return -1;  | 
 | 41 | + | 
 | 42 | +		switch (info.perf_event.type) {  | 
 | 43 | +		case BPF_PERF_EVENT_KPROBE:  | 
 | 44 | +		case BPF_PERF_EVENT_KRETPROBE:  | 
 | 45 | +			ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");  | 
 | 46 | + | 
 | 47 | +			/* In case kptr setting is not permitted or MAX_SYMS is reached */  | 
 | 48 | +			if (addr)  | 
 | 49 | +				ASSERT_EQ(info.perf_event.kprobe.addr, addr, "kprobe_addr");  | 
 | 50 | + | 
 | 51 | +			if (!info.perf_event.kprobe.func_name) {  | 
 | 52 | +				ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");  | 
 | 53 | +				info.perf_event.kprobe.func_name = ptr_to_u64(&buf);  | 
 | 54 | +				info.perf_event.kprobe.name_len = sizeof(buf);  | 
 | 55 | +				goto again;  | 
 | 56 | +			}  | 
 | 57 | + | 
 | 58 | +			err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,  | 
 | 59 | +				      strlen(KPROBE_FUNC));  | 
 | 60 | +			ASSERT_EQ(err, 0, "cmp_kprobe_func_name");  | 
 | 61 | +			break;  | 
 | 62 | +		case BPF_PERF_EVENT_TRACEPOINT:  | 
 | 63 | +			if (!info.perf_event.tracepoint.tp_name) {  | 
 | 64 | +				ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");  | 
 | 65 | +				info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);  | 
 | 66 | +				info.perf_event.tracepoint.name_len = sizeof(buf);  | 
 | 67 | +				goto again;  | 
 | 68 | +			}  | 
 | 69 | + | 
 | 70 | +			err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,  | 
 | 71 | +				      strlen(TP_NAME));  | 
 | 72 | +			ASSERT_EQ(err, 0, "cmp_tp_name");  | 
 | 73 | +			break;  | 
 | 74 | +		case BPF_PERF_EVENT_UPROBE:  | 
 | 75 | +		case BPF_PERF_EVENT_URETPROBE:  | 
 | 76 | +			ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");  | 
 | 77 | + | 
 | 78 | +			if (!info.perf_event.uprobe.file_name) {  | 
 | 79 | +				ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");  | 
 | 80 | +				info.perf_event.uprobe.file_name = ptr_to_u64(&buf);  | 
 | 81 | +				info.perf_event.uprobe.name_len = sizeof(buf);  | 
 | 82 | +				goto again;  | 
 | 83 | +			}  | 
 | 84 | + | 
 | 85 | +			err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,  | 
 | 86 | +				      strlen(UPROBE_FILE));  | 
 | 87 | +			ASSERT_EQ(err, 0, "cmp_file_name");  | 
 | 88 | +			break;  | 
 | 89 | +		default:  | 
 | 90 | +			break;  | 
 | 91 | +		}  | 
 | 92 | +		break;  | 
 | 93 | +	default:  | 
 | 94 | +		switch (type) {  | 
 | 95 | +		case BPF_PERF_EVENT_KPROBE:  | 
 | 96 | +		case BPF_PERF_EVENT_KRETPROBE:  | 
 | 97 | +		case BPF_PERF_EVENT_TRACEPOINT:  | 
 | 98 | +		case BPF_PERF_EVENT_UPROBE:  | 
 | 99 | +		case BPF_PERF_EVENT_URETPROBE:  | 
 | 100 | +			err = -1;  | 
 | 101 | +			break;  | 
 | 102 | +		default:  | 
 | 103 | +			break;  | 
 | 104 | +		}  | 
 | 105 | +		break;  | 
 | 106 | +	}  | 
 | 107 | +	return err;  | 
 | 108 | +}  | 
 | 109 | + | 
 | 110 | +static void kprobe_fill_invalid_user_buffer(int fd)  | 
 | 111 | +{  | 
 | 112 | +	struct bpf_link_info info;  | 
 | 113 | +	__u32 len = sizeof(info);  | 
 | 114 | +	int err = 0;  | 
 | 115 | + | 
 | 116 | +	memset(&info, 0, sizeof(info));  | 
 | 117 | + | 
 | 118 | +	info.perf_event.kprobe.func_name = 0x1; /* invalid address */  | 
 | 119 | +	err = bpf_link_get_info_by_fd(fd, &info, &len);  | 
 | 120 | +	ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");  | 
 | 121 | + | 
 | 122 | +	info.perf_event.kprobe.name_len = 64;  | 
 | 123 | +	err = bpf_link_get_info_by_fd(fd, &info, &len);  | 
 | 124 | +	ASSERT_EQ(err, -EFAULT, "invalid_buff");  | 
 | 125 | + | 
 | 126 | +	info.perf_event.kprobe.func_name = 0;  | 
 | 127 | +	err = bpf_link_get_info_by_fd(fd, &info, &len);  | 
 | 128 | +	ASSERT_EQ(err, -EINVAL, "invalid_len");  | 
 | 129 | + | 
 | 130 | +	ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");  | 
 | 131 | +	ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");  | 
 | 132 | +	ASSERT_EQ(info.perf_event.type, 0, "type");  | 
 | 133 | +}  | 
 | 134 | + | 
 | 135 | +static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,  | 
 | 136 | +				       enum bpf_perf_event_type type,  | 
 | 137 | +				       bool retprobe, bool invalid)  | 
 | 138 | +{  | 
 | 139 | +	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,  | 
 | 140 | +		.attach_mode = PROBE_ATTACH_MODE_LINK,  | 
 | 141 | +		.retprobe = retprobe,  | 
 | 142 | +	);  | 
 | 143 | +	int link_fd, err;  | 
 | 144 | +	long addr;  | 
 | 145 | + | 
 | 146 | +	skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,  | 
 | 147 | +								 KPROBE_FUNC, &opts);  | 
 | 148 | +	if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))  | 
 | 149 | +		return;  | 
 | 150 | + | 
 | 151 | +	link_fd = bpf_link__fd(skel->links.kprobe_run);  | 
 | 152 | +	if (!ASSERT_GE(link_fd, 0, "link_fd"))  | 
 | 153 | +		return;  | 
 | 154 | + | 
 | 155 | +	addr = ksym_get_addr(KPROBE_FUNC);  | 
 | 156 | +	if (!invalid) {  | 
 | 157 | +		err = verify_link_info(link_fd, type, addr, 0);  | 
 | 158 | +		ASSERT_OK(err, "verify_link_info");  | 
 | 159 | +	} else {  | 
 | 160 | +		kprobe_fill_invalid_user_buffer(link_fd);  | 
 | 161 | +	}  | 
 | 162 | +	bpf_link__detach(skel->links.kprobe_run);  | 
 | 163 | +}  | 
 | 164 | + | 
 | 165 | +static void test_tp_fill_link_info(struct test_fill_link_info *skel)  | 
 | 166 | +{  | 
 | 167 | +	int link_fd, err;  | 
 | 168 | + | 
 | 169 | +	skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);  | 
 | 170 | +	if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))  | 
 | 171 | +		return;  | 
 | 172 | + | 
 | 173 | +	link_fd = bpf_link__fd(skel->links.tp_run);  | 
 | 174 | +	if (!ASSERT_GE(link_fd, 0, "link_fd"))  | 
 | 175 | +		return;  | 
 | 176 | + | 
 | 177 | +	err = verify_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0);  | 
 | 178 | +	ASSERT_OK(err, "verify_link_info");  | 
 | 179 | +	bpf_link__detach(skel->links.tp_run);  | 
 | 180 | +}  | 
 | 181 | + | 
 | 182 | +static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,  | 
 | 183 | +				       enum bpf_perf_event_type type, ssize_t offset,  | 
 | 184 | +				       bool retprobe)  | 
 | 185 | +{  | 
 | 186 | +	int link_fd, err;  | 
 | 187 | + | 
 | 188 | +	skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, retprobe,  | 
 | 189 | +							    0, /* self pid */  | 
 | 190 | +							    UPROBE_FILE, offset);  | 
 | 191 | +	if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))  | 
 | 192 | +		return;  | 
 | 193 | + | 
 | 194 | +	link_fd = bpf_link__fd(skel->links.uprobe_run);  | 
 | 195 | +	if (!ASSERT_GE(link_fd, 0, "link_fd"))  | 
 | 196 | +		return;  | 
 | 197 | + | 
 | 198 | +	err = verify_link_info(link_fd, type, 0, offset);  | 
 | 199 | +	ASSERT_OK(err, "verify_link_info");  | 
 | 200 | +	bpf_link__detach(skel->links.uprobe_run);  | 
 | 201 | +}  | 
 | 202 | + | 
 | 203 | +void serial_test_fill_link_info(void)  | 
 | 204 | +{  | 
 | 205 | +	struct test_fill_link_info *skel;  | 
 | 206 | +	ssize_t offset;  | 
 | 207 | + | 
 | 208 | +	skel = test_fill_link_info__open_and_load();  | 
 | 209 | +	if (!ASSERT_OK_PTR(skel, "skel_open"))  | 
 | 210 | +		goto cleanup;  | 
 | 211 | + | 
 | 212 | +	/* load kallsyms to compare the addr */  | 
 | 213 | +	if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))  | 
 | 214 | +		return;  | 
 | 215 | +	if (test__start_subtest("kprobe_link_info"))  | 
 | 216 | +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, false);  | 
 | 217 | +	if (test__start_subtest("kretprobe_link_info"))  | 
 | 218 | +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, true, false);  | 
 | 219 | +	if (test__start_subtest("fill_invalid_user_buff"))  | 
 | 220 | +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, true);  | 
 | 221 | +	if (test__start_subtest("tracepoint_link_info"))  | 
 | 222 | +		test_tp_fill_link_info(skel);  | 
 | 223 | + | 
 | 224 | +	offset = get_uprobe_offset(&uprobe_func);  | 
 | 225 | +	if (test__start_subtest("uprobe_link_info"))  | 
 | 226 | +		test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE, offset, false);  | 
 | 227 | +	if (test__start_subtest("uretprobe_link_info"))  | 
 | 228 | +		test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE, offset, true);  | 
 | 229 | + | 
 | 230 | +cleanup:  | 
 | 231 | +	test_fill_link_info__destroy(skel);  | 
 | 232 | +}  | 
0 commit comments