From 18940f14fd915ae97e5578cb6acf1e7400675e69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Feb 2022 23:18:05 +0000 Subject: [PATCH] build(deps): bump github.com/cilium/ebpf from 0.7.0 to 0.8.1 Bumps [github.com/cilium/ebpf](https://github.com/cilium/ebpf) from 0.7.0 to 0.8.1. - [Release notes](https://github.com/cilium/ebpf/releases) - [Commits](https://github.com/cilium/ebpf/compare/v0.7.0...v0.8.1) --- updated-dependencies: - dependency-name: github.com/cilium/ebpf dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 25 +- vendor/github.com/cilium/ebpf/ARCHITECTURE.md | 6 + vendor/github.com/cilium/ebpf/Makefile | 53 +- vendor/github.com/cilium/ebpf/README.md | 10 +- vendor/github.com/cilium/ebpf/asm/func.go | 7 + .../github.com/cilium/ebpf/asm/func_string.go | 11 +- .../github.com/cilium/ebpf/asm/instruction.go | 190 +++- vendor/github.com/cilium/ebpf/asm/jump.go | 40 +- vendor/github.com/cilium/ebpf/asm/opcode.go | 101 +- .../cilium/ebpf/asm/opcode_string.go | 18 +- vendor/github.com/cilium/ebpf/asm/register.go | 1 + vendor/github.com/cilium/ebpf/collection.go | 68 +- vendor/github.com/cilium/ebpf/elf_reader.go | 489 +++++---- .../github.com/cilium/ebpf/elf_reader_fuzz.go | 22 - vendor/github.com/cilium/ebpf/go.mod | 4 +- vendor/github.com/cilium/ebpf/go.sum | 21 +- vendor/github.com/cilium/ebpf/info.go | 152 ++- .../cilium/ebpf/internal/btf/btf.go | 479 +++++---- .../cilium/ebpf/internal/btf/core.go | 41 +- .../cilium/ebpf/internal/btf/ext_info.go | 541 ++++++---- .../cilium/ebpf/internal/btf/format.go | 304 ++++++ .../cilium/ebpf/internal/btf/fuzz.go | 50 - .../cilium/ebpf/internal/btf/info.go | 23 +- .../cilium/ebpf/internal/btf/syscalls.go | 31 - .../cilium/ebpf/internal/btf/types.go | 107 +- vendor/github.com/cilium/ebpf/internal/elf.go | 11 + .../github.com/cilium/ebpf/internal/errors.go | 12 +- vendor/github.com/cilium/ebpf/internal/fd.go | 69 -- vendor/github.com/cilium/ebpf/internal/io.go | 48 +- .../github.com/cilium/ebpf/internal/output.go | 84 ++ .../cilium/ebpf/internal/pinning.go | 32 +- .../cilium/ebpf/internal/sys/doc.go | 4 + .../github.com/cilium/ebpf/internal/sys/fd.go | 96 ++ .../cilium/ebpf/internal/{ => sys}/ptr.go | 9 +- .../ebpf/internal/{ => sys}/ptr_32_be.go | 2 +- .../ebpf/internal/{ => sys}/ptr_32_le.go | 2 +- .../cilium/ebpf/internal/{ => sys}/ptr_64.go | 2 +- .../cilium/ebpf/internal/sys/syscall.go | 123 +++ .../cilium/ebpf/internal/sys/types.go | 954 ++++++++++++++++++ .../cilium/ebpf/internal/syscall.go | 304 ------ .../cilium/ebpf/internal/syscall_string.go | 56 - .../cilium/ebpf/internal/unix/types_linux.go | 25 +- .../cilium/ebpf/internal/unix/types_other.go | 16 +- .../github.com/cilium/ebpf/internal/vdso.go | 150 +++ .../cilium/ebpf/internal/version.go | 83 +- vendor/github.com/cilium/ebpf/link/cgroup.go | 6 + .../github.com/cilium/ebpf/link/freplace.go | 88 -- vendor/github.com/cilium/ebpf/link/iter.go | 31 +- vendor/github.com/cilium/ebpf/link/kprobe.go | 97 +- vendor/github.com/cilium/ebpf/link/link.go | 213 +++- vendor/github.com/cilium/ebpf/link/netns.go | 22 +- .../github.com/cilium/ebpf/link/perf_event.go | 24 +- vendor/github.com/cilium/ebpf/link/program.go | 10 +- .../cilium/ebpf/link/raw_tracepoint.go | 60 +- .../cilium/ebpf/link/socket_filter.go | 40 + .../github.com/cilium/ebpf/link/syscalls.go | 120 +-- vendor/github.com/cilium/ebpf/link/tracing.go | 153 +++ vendor/github.com/cilium/ebpf/link/uprobe.go | 85 +- vendor/github.com/cilium/ebpf/link/xdp.go | 54 + vendor/github.com/cilium/ebpf/linker.go | 164 +-- vendor/github.com/cilium/ebpf/map.go | 414 +++++--- vendor/github.com/cilium/ebpf/marshalers.go | 27 +- vendor/github.com/cilium/ebpf/prog.go | 325 ++++-- vendor/github.com/cilium/ebpf/run-tests.sh | 15 +- vendor/github.com/cilium/ebpf/syscalls.go | 324 +----- vendor/github.com/cilium/ebpf/types.go | 22 +- vendor/github.com/cilium/ebpf/types_string.go | 7 +- vendor/modules.txt | 3 +- 69 files changed, 4768 insertions(+), 2414 deletions(-) delete mode 100644 vendor/github.com/cilium/ebpf/elf_reader_fuzz.go create mode 100644 vendor/github.com/cilium/ebpf/internal/btf/format.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/fuzz.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/btf/syscalls.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/fd.go create mode 100644 vendor/github.com/cilium/ebpf/internal/output.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/doc.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/fd.go rename vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr.go (71%) rename vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr_32_be.go (93%) rename vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr_32_le.go (94%) rename vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr_64.go (95%) create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/syscall.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/types.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/syscall.go delete mode 100644 vendor/github.com/cilium/ebpf/internal/syscall_string.go create mode 100644 vendor/github.com/cilium/ebpf/internal/vdso.go delete mode 100644 vendor/github.com/cilium/ebpf/link/freplace.go create mode 100644 vendor/github.com/cilium/ebpf/link/socket_filter.go create mode 100644 vendor/github.com/cilium/ebpf/link/tracing.go create mode 100644 vendor/github.com/cilium/ebpf/link/xdp.go diff --git a/go.mod b/go.mod index 59c6df10912..8202eedcae3 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/checkpoint-restore/go-criu/v5 v5.2.0 - github.com/cilium/ebpf v0.7.0 + github.com/cilium/ebpf v0.8.1 github.com/containerd/console v1.0.3 github.com/coreos/go-systemd/v22 v22.3.2 github.com/cyphar/filepath-securejoin v0.2.3 diff --git a/go.sum b/go.sum index 9018c6fbd2c..9158be4cb97 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,37 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/checkpoint-restore/go-criu/v5 v5.2.0 h1:QwsRK9EdBr2kQr44DqSdBrP4dULp2+4EkqounYQOnF8= github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao= +github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= @@ -39,6 +42,8 @@ github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK9 github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= @@ -76,4 +81,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md index 6cbb31b6481..8cd7e2486e7 100644 --- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md @@ -78,3 +78,9 @@ tend to use bpf_link to do so. Older hooks unfortunately use a combination of syscalls, netlink messages, etc. Adding support for a new link type should not pull in large dependencies like netlink, so XDP programs or tracepoints are out of scope. + +Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds +to BPF_LINK_TRACING. In general, these types should be unexported as long as they +don't export methods outside of the Link interface. Each Go type may have multiple +exported constructors. For example `AttachTracing` and `AttachLSM` create a +tracing link, but are distinct functions since they may require different arguments. diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile index 0bc15c0810c..76a448caa15 100644 --- a/vendor/github.com/cilium/ebpf/Makefile +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -1,14 +1,20 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang-12 -CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) +CLANG ?= clang-13 +STRIP ?= llvm-strip-13 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) # Obtain an absolute path to the directory of the Makefile. # Assume the Makefile is in the root of the repository. REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman),, --user "${UIDGID}") + IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) @@ -26,23 +32,27 @@ TARGETS := \ testdata/strings \ testdata/freplace \ testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ internal/btf/testdata/relocs -.PHONY: all clean docker-all docker-shell +.PHONY: all clean container-all container-shell generate -.DEFAULT_TARGET = docker-all +.DEFAULT_TARGET = container-all -# Build all ELF binaries using a Dockerized LLVM toolchain. -docker-all: - docker run --rm --user "${UIDGID}" \ +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ + --env HOME="/tmp" \ "${IMAGE}:${VERSION}" \ - make all + $(MAKE) all -# (debug) Drop the user into a shell inside the Docker container as root. -docker-shell: - docker run --rm -ti \ +# (debug) Drop the user into a shell inside the container as root. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ -v "${REPODIR}":/ebpf -w /ebpf \ "${IMAGE}:${VERSION}" @@ -50,21 +60,32 @@ clean: -$(RM) testdata/*.elf -$(RM) internal/btf/testdata/*.elf -all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) +all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf +# $BPF_CLANG is used in go:generate invocations. +generate: export BPF_CLANG := $(CLANG) +generate: export BPF_CFLAGS := $(CFLAGS) +generate: + go generate ./cmd/bpf2go/test + cd examples/ && go generate ./... + testdata/loader-%-el.elf: testdata/loader.c - $* $(CFLAGS) -mlittle-endian -c $< -o $@ + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ testdata/loader-%-eb.elf: testdata/loader.c - $* $(CFLAGS) -mbig-endian -c $< -o $@ + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ %-el.elf: %.c - $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ %-eb.elf : %.c - $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ # Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf .PHONY: vmlinux-btf diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md index 01e2fff92bb..69a6bb0e968 100644 --- a/vendor/github.com/cilium/ebpf/README.md +++ b/vendor/github.com/cilium/ebpf/README.md @@ -45,13 +45,16 @@ This library includes the following packages: `PERF_EVENT_ARRAY` * [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a `BPF_MAP_TYPE_RINGBUF` map - +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* Linux >= 4.9. CI is run against LTS releases. +* Linux >= 4.4. CI is run against LTS releases. ## Regenerating Testdata @@ -59,6 +62,9 @@ Run `make` in the root of this repository to rebuild testdata in all subpackages. This requires Docker, as it relies on a standardized build environment to keep the build output stable. +It is possible to regenerate data using Podman by overriding the `CONTAINER_*` +variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. + The toolchain image build files are kept in [testdata/docker/](testdata/docker/). ## License diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go index bfa5d59c976..b75a2934ee6 100644 --- a/vendor/github.com/cilium/ebpf/asm/func.go +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -190,6 +190,13 @@ const ( FnSysBpf FnBtfFindByNameKind FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs ) // Call emits a function call. diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go index 5a0e333639a..179bc24f1a3 100644 --- a/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -177,11 +177,18 @@ func _() { _ = x[FnSysBpf-166] _ = x[FnBtfFindByNameKind-167] _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysClose" +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegs" -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497} +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591} func (i BuiltinFunc) String() string { if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go index 64d717d156d..22975e8f72b 100644 --- a/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -26,13 +26,17 @@ func (rio RawInstructionOffset) Bytes() uint64 { // Instruction is a single eBPF instruction. type Instruction struct { - OpCode OpCode - Dst Register - Src Register - Offset int16 - Constant int64 + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Reference denotes a reference (e.g. a jump) to another symbol. Reference string - Symbol string + + // Symbol denotes an instruction at the start of a function body. + Symbol string } // Sym creates a symbol. @@ -43,33 +47,45 @@ func (ins Instruction) Sym(name string) Instruction { // Unmarshal decodes a BPF instruction. func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { - var bi bpfInstruction - err := binary.Read(r, bo, &bi) - if err != nil { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { return 0, err } - ins.OpCode = bi.OpCode - ins.Offset = bi.Offset - ins.Constant = int64(bi.Constant) - ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) - if err != nil { - return 0, fmt.Errorf("can't unmarshal registers: %s", err) + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) } - if !bi.OpCode.IsDWordLoad() { + ins.Offset = int16(bo.Uint16(data[2:4])) + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { return InstructionSize, nil } - var bi2 bpfInstruction - if err := binary.Read(r, bo, &bi2); err != nil { + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { // No Wrap, to avoid io.EOF clash return 0, errors.New("64bit immediate is missing second half") } - if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { return 0, errors.New("64bit immediate has non-zero fields") } - ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) return 2 * InstructionSize, nil } @@ -93,14 +109,12 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } - bpfi := bpfInstruction{ - ins.OpCode, - regs, - ins.Offset, - cons, - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + data := make([]byte, InstructionSize) + data[0] = byte(ins.OpCode) + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { return 0, err } @@ -108,11 +122,11 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return InstructionSize, nil } - bpfi = bpfInstruction{ - Constant: int32(ins.Constant >> 32), - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { return 0, err } @@ -181,6 +195,18 @@ func (ins *Instruction) IsFunctionCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall } +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + // IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. func (ins *Instruction) IsBuiltinCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 @@ -226,8 +252,8 @@ func (ins Instruction) Format(f fmt.State, c rune) { } fmt.Fprintf(f, "%v ", op) - switch cls := op.Class(); cls { - case LdClass, LdXClass, StClass, StXClass: + switch cls := op.Class(); { + case cls.isLoadOrStore(): switch op.Mode() { case ImmMode: fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) @@ -241,7 +267,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) } - case ALU64Class, ALUClass: + case cls.IsALU(): fmt.Fprintf(f, "dst: %s ", ins.Dst) if op.ALUOp() == Swap || op.Source() == ImmSource { fmt.Fprintf(f, "imm: %d", ins.Constant) @@ -249,7 +275,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "src: %s", ins.Src) } - case JumpClass: + case cls.IsJump(): switch jop := op.JumpOp(); jop { case Call: if ins.Src == PseudoCall { @@ -275,13 +301,60 @@ ref: } } +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + // Instructions is an eBPF program. type Instructions []Instruction +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol +} + func (insns Instructions) String() string { return fmt.Sprint(insns) } +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. // // Returns an error if the symbol isn't used, see IsUnreferencedSymbol. @@ -331,6 +404,31 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { return offsets, nil } +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() map[string]bool { + calls := make(map[string]bool) + + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference] = true + } + + return calls +} + // ReferenceOffsets returns the set of references and their offset in // the instructions. func (insns Instructions) ReferenceOffsets() map[string][]int { @@ -464,13 +562,6 @@ func (iter *InstructionIterator) Next() bool { return true } -type bpfInstruction struct { - OpCode OpCode - Registers bpfRegisters - Offset int16 - Constant int32 -} - type bpfRegisters uint8 func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { @@ -484,17 +575,6 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro } } -func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { - switch bo { - case binary.LittleEndian: - return Register(r & 0xF), Register(r >> 4), nil - case binary.BigEndian: - return Register(r >> 4), Register(r & 0xf), nil - default: - return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) - } -} - type unreferencedSymbolError struct { symbol string } diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go index 7757179de64..199c0694064 100644 --- a/vendor/github.com/cilium/ebpf/asm/jump.go +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -60,14 +60,22 @@ func (op JumpOp) Op(source Source) OpCode { return OpCode(JumpClass).SetJumpOp(op).SetSource(source) } -// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + Reference: label, } +} +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), + OpCode: op.opCode(Jump32Class, ImmSource), Dst: dst, Offset: -1, Constant: int64(value), @@ -75,14 +83,22 @@ func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { } } -// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Reg(dst, src Register, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + Reference: label, } +} +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), + OpCode: op.opCode(Jump32Class, RegSource), Dst: dst, Src: src, Offset: -1, @@ -90,6 +106,14 @@ func (op JumpOp) Reg(dst, src Register, label string) Instruction { } } +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call || op == Ja { + return InvalidOpCode + } + + return OpCode(class).SetJumpOp(op).SetSource(source) +} + // Label adjusts PC to the address of the label. func (op JumpOp) Label(label string) Instruction { if op == Call { diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go index 6edc3cf5917..f6d8e0668a8 100644 --- a/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -7,14 +7,6 @@ import ( //go:generate stringer -output opcode_string.go -type=Class -type encoding int - -const ( - unknownEncoding encoding = iota - loadOrStore - jumpOrALU -) - // Class of operations // // msb lsb @@ -38,19 +30,39 @@ const ( ALUClass Class = 0x04 // JumpClass jump operators JumpClass Class = 0x05 + // Jump32Class jump operators with 32 bit comparaisons + // Requires kernel 5.1 + Jump32Class Class = 0x06 // ALU64Class arithmetic in 64 bit mode ALU64Class Class = 0x07 ) -func (cls Class) encoding() encoding { - switch cls { - case LdClass, LdXClass, StClass, StXClass: - return loadOrStore - case ALU64Class, ALUClass, JumpClass: - return jumpOrALU - default: - return unknownEncoding - } +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() } // OpCode is a packed eBPF opcode. @@ -86,7 +98,7 @@ func (op OpCode) Class() Class { // Mode returns the mode for load and store operations. func (op OpCode) Mode() Mode { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidMode } return Mode(op & modeMask) @@ -94,7 +106,7 @@ func (op OpCode) Mode() Mode { // Size returns the size for load and store operations. func (op OpCode) Size() Size { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidSize } return Size(op & sizeMask) @@ -102,7 +114,7 @@ func (op OpCode) Size() Size { // Source returns the source for branch and ALU operations. func (op OpCode) Source() Source { - if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { return InvalidSource } return Source(op & sourceMask) @@ -110,7 +122,7 @@ func (op OpCode) Source() Source { // ALUOp returns the ALUOp. func (op OpCode) ALUOp() ALUOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsALU() { return InvalidALUOp } return ALUOp(op & aluMask) @@ -125,18 +137,27 @@ func (op OpCode) Endianness() Endianness { } // JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. func (op OpCode) JumpOp() JumpOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsJump() { return InvalidJumpOp } - return JumpOp(op & jumpMask) + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { + return InvalidJumpOp + } + + return jumpOp } // SetMode sets the mode on load and store operations. // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetMode(mode Mode) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { return InvalidOpCode } return (op & ^modeMask) | OpCode(mode) @@ -146,7 +167,7 @@ func (op OpCode) SetMode(mode Mode) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSize(size Size) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { return InvalidOpCode } return (op & ^sizeMask) | OpCode(size) @@ -156,7 +177,7 @@ func (op OpCode) SetSize(size Size) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSource(source Source) OpCode { - if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { return InvalidOpCode } return (op & ^sourceMask) | OpCode(source) @@ -166,8 +187,7 @@ func (op OpCode) SetSource(source Source) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetALUOp(alu ALUOp) OpCode { - class := op.Class() - if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { return InvalidOpCode } return (op & ^aluMask) | OpCode(alu) @@ -177,17 +197,25 @@ func (op OpCode) SetALUOp(alu ALUOp) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetJumpOp(jump JumpOp) OpCode { - if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { return InvalidOpCode } - return (op & ^jumpMask) | OpCode(jump) + + return newOp } func (op OpCode) String() string { var f strings.Builder - switch class := op.Class(); class { - case LdClass, LdXClass, StClass, StXClass: + switch class := op.Class(); { + case class.isLoadOrStore(): f.WriteString(strings.TrimSuffix(class.String(), "Class")) mode := op.Mode() @@ -204,7 +232,7 @@ func (op OpCode) String() string { f.WriteString("B") } - case ALU64Class, ALUClass: + case class.IsALU(): f.WriteString(op.ALUOp().String()) if op.ALUOp() == Swap { @@ -218,8 +246,13 @@ func (op OpCode) String() string { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } - case JumpClass: + case class.IsJump(): f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + if jop := op.JumpOp(); jop != Exit && jop != Call { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go index 079ce1db0b8..58bc3e7e7f0 100644 --- a/vendor/github.com/cilium/ebpf/asm/opcode_string.go +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -14,25 +14,17 @@ func _() { _ = x[StXClass-3] _ = x[ALUClass-4] _ = x[JumpClass-5] + _ = x[Jump32Class-6] _ = x[ALU64Class-7] } -const ( - _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" - _Class_name_1 = "ALU64Class" -) +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" -var ( - _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} -) +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} func (i Class) String() string { - switch { - case 0 <= i && i <= 5: - return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] - case i == 7: - return _Class_name_1 - default: + if i >= Class(len(_Class_index)-1) { return "Class(" + strconv.FormatInt(int64(i), 10) + ")" } + return _Class_name[_Class_index[i]:_Class_index[i+1]] } diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go index 76cb44bffc7..dd5d44f1c19 100644 --- a/vendor/github.com/cilium/ebpf/asm/register.go +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -38,6 +38,7 @@ const ( PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC ) func (r Register) String() string { diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go index 2ededc87a05..fb32ada88a3 100644 --- a/vendor/github.com/cilium/ebpf/collection.go +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" ) // CollectionOptions control loading a collection into the kernel. @@ -244,9 +244,14 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) switch m.typ { case ProgramArray: // Require all lazy-loaded ProgramArrays to be assigned to the given object. - // Without any references, they will be closed on the first GC and all tail - // calls into them will miss. - if !assignedMaps[n] { + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) } } @@ -281,7 +286,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co } } - for progName := range spec.Programs { + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + if _, err := loader.loadProgram(progName); err != nil { return nil, err } @@ -419,9 +428,16 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { return nil, fmt.Errorf("unknown program %s", progName) } + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + progSpec = progSpec.Copy() - // Rewrite any reference to a valid map. + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. for i := range progSpec.Instructions { ins := &progSpec.Instructions[i] @@ -442,7 +458,7 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { fd := m.FD() if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) + return nil, fmt.Errorf("map %s: %w", ins.Reference, sys.ErrClosedFd) } if err := ins.RewriteMapPtr(m.FD()); err != nil { return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err) @@ -467,24 +483,30 @@ func (cl *collectionLoader) populateMaps() error { mapSpec = mapSpec.Copy() - // Replace any object stubs with loaded objects. + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. for i, kv := range mapSpec.Contents { - switch v := kv.Value.(type) { - case programStub: - // loadProgram is idempotent and could return an existing Program. - prog, err := cl.loadProgram(string(v)) - if err != nil { - return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err) - } - mapSpec.Contents[i] = MapKV{kv.Key, prog} - - case mapStub: - // loadMap is idempotent and could return an existing Map. - innerMap, err := cl.loadMap(string(v)) - if err != nil { - return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err) + if objName, ok := kv.Value.(string); ok { + switch mapSpec.Type { + case ProgramArray: + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case ArrayOfMaps, HashOfMaps: + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } - mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } } diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go index 42010f43e58..bbc88310844 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -100,37 +100,6 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load BTF: %w", err) } - // Assign symbols to all the sections we're interested in. - symbols, err := f.Symbols() - if err != nil { - return nil, fmt.Errorf("load symbols: %v", err) - } - - for _, symbol := range symbols { - idx := symbol.Section - symType := elf.ST_TYPE(symbol.Info) - - section := sections[idx] - if section == nil { - continue - } - - // Older versions of LLVM don't tag symbols correctly, so keep - // all NOTYPE ones. - keep := symType == elf.STT_NOTYPE - switch section.kind { - case mapSection, btfMapSection, dataSection: - keep = keep || symType == elf.STT_OBJECT - case programSection: - keep = keep || symType == elf.STT_FUNC - } - if !keep || symbol.Name == "" { - continue - } - - section.symbols[symbol.Value] = symbol - } - ec := &elfCode{ SafeELFFile: f, sections: sections, @@ -139,6 +108,13 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { btf: btfSpec, } + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + ec.assignSymbols(symbols) + // Go through relocation sections, and parse the ones for sections we're // interested in. Make sure that relocations point at valid sections. for idx, relSection := range relSections { @@ -183,7 +159,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { } // Finally, collect programs and link them. - progs, err := ec.loadPrograms() + progs, err := ec.loadProgramSections() if err != nil { return nil, fmt.Errorf("load programs: %w", err) } @@ -247,12 +223,57 @@ func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { } } -func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { - var ( - progs []*ProgramSpec - libs []*ProgramSpec - ) +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + // Generate a ProgramSpec for each function found in each program section. for _, sec := range ec.sections { if sec.kind != programSection { continue @@ -262,86 +283,150 @@ func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { return nil, fmt.Errorf("section %v: missing symbols", sec.Name) } - funcSym, ok := sec.symbols[0] - if !ok { - return nil, fmt.Errorf("section %v: no label at start", sec.Name) - } - - insns, length, err := ec.loadInstructions(sec) + funcs, err := ec.loadFunctions(sec) if err != nil { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + return nil, fmt.Errorf("section %v: %w", sec.Name, err) } progType, attachType, progFlags, attachTo := getProgType(sec.Name) - spec := &ProgramSpec{ - Name: funcSym.Name, - Type: progType, - Flags: progFlags, - AttachType: attachType, - AttachTo: attachTo, - License: ec.license, - KernelVersion: ec.version, - Instructions: insns, - ByteOrder: ec.ByteOrder, - } + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } - if ec.btf != nil { - spec.BTF, err = ec.btf.Program(sec.Name, length) - if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + if ec.btf != nil { + spec.BTF, err = ec.btf.Program(name) + if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { + return nil, fmt.Errorf("program %s: %w", name, err) + } } - } - if spec.Type == UnspecifiedProgram { - // There is no single name we can use for "library" sections, - // since they may contain multiple functions. We'll decode the - // labels they contain later on, and then link sections that way. - libs = append(libs, spec) - } else { - progs = append(progs, spec) + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) + } + progs[name] = spec } } - res := make(map[string]*ProgramSpec, len(progs)) - for _, prog := range progs { - err := link(prog, libs) - if err != nil { - return nil, fmt.Errorf("program %s: %w", prog.Name, err) + // Populate each prog's references with pointers to all of its callees. + if err := populateReferences(progs); err != nil { + return nil, fmt.Errorf("populating references: %w", err) + } + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) } - res[prog.Name] = prog } - return res, nil + return progs, nil } -func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) { +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { var ( r = bufio.NewReader(section.Open()) - insns asm.Instructions + funcs = make(map[string]asm.Instructions) offset uint64 + insns asm.Instructions ) for { - var ins asm.Instruction + ins := asm.Instruction{ + // Symbols denote the first instruction of a function body. + Symbol: section.symbols[offset].Name, + } + + // Pull one instruction from the instruction stream. n, err := ins.Unmarshal(r, ec.ByteOrder) - if err == io.EOF { - return insns, offset, nil + if errors.Is(err, io.EOF) { + fn := insns.Name() + if fn == "" { + return nil, errors.New("reached EOF before finding a valid symbol") + } + + // Reached the end of the section and the decoded instruction buffer + // contains at least one valid instruction belonging to a function. + // Store the result and stop processing instructions. + funcs[fn] = insns + break } if err != nil { - return nil, 0, fmt.Errorf("offset %d: %w", offset, err) + return nil, fmt.Errorf("offset %d: %w", offset, err) } - ins.Symbol = section.symbols[offset].Name + // Decoded the first instruction of a function body but insns already + // holds a valid instruction stream. Store the result and flush insns. + if ins.Symbol != "" && insns.Name() != "" { + funcs[insns.Name()] = insns + insns = nil + } if rel, ok := section.relocations[offset]; ok { + // A relocation was found for the current offset. Apply it to the insn. if err = ec.relocateInstruction(&ins, rel); err != nil { - return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + return nil, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + } + } else { + // Up to LLVM 9, calls to subprograms within the same ELF section are + // sometimes encoded using relative jumps without relocation entries. + // If, after all relocations entries have been processed, there are + // still relative pseudocalls left, they must point to an existing + // symbol within the section. + // When splitting sections into subprograms, the targets of these calls + // are no longer in scope, so they must be resolved here. + if ins.IsFunctionReference() && ins.Constant != -1 { + tgt := jumpTarget(offset, ins) + sym := section.symbols[tgt].Name + if sym == "" { + return nil, fmt.Errorf("offset %d: no jump target found at offset %d", offset, tgt) + } + + ins.Reference = sym + ins.Constant = -1 } } insns = append(insns, ins) offset += n } + + return funcs, nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) } func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { @@ -413,44 +498,70 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err } case programSection: - if ins.OpCode.JumpOp() != asm.Call { - return fmt.Errorf("not a call instruction: %s", ins) - } + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } - if ins.Src != asm.PseudoCall { - return fmt.Errorf("call: %s: incorrect source register", name) - } + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - switch typ { - case elf.STT_NOTYPE, elf.STT_FUNC: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) - } + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - case elf.STT_SECTION: - if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } - // The function we want to call is in the indicated section, - // at the offset encoded in the instruction itself. Reverse - // the calculation to find the real function we're looking for. - // A value of -1 references the first instruction in the section. - offset := int64(int32(ins.Constant)+1) * asm.InstructionSize - if offset < 0 { - return fmt.Errorf("call: %s: invalid offset %d", name, offset) + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) } - sym, ok := target.symbols[uint64(offset)] + sym, ok := target.symbols[uint64(ins.Constant)] if !ok { - return fmt.Errorf("call: %s: no symbol at offset %d", name, offset) + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) } - ins.Constant = -1 name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc default: - return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + return fmt.Errorf("neither a call nor a load instruction: %v", ins) } case undefSection: @@ -525,7 +636,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return fmt.Errorf("map %s: reading map tail: %w", mapName, err) } if len(extra) > 0 { - spec.Extra = *bytes.NewReader(extra) + spec.Extra = bytes.NewReader(extra) } if err := spec.clampPerfEventArraySize(); err != nil { @@ -554,7 +665,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { // Each section must appear as a DataSec in the ELF's BTF blob. var ds *btf.Datasec - if err := ec.btf.FindType(sec.Name, &ds); err != nil { + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) } @@ -617,14 +728,6 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return nil } -// A programStub is a placeholder for a Program to be inserted at a certain map key. -// It needs to be resolved into a Program later on in the loader process. -type programStub string - -// A mapStub is a placeholder for a Map to be inserted at a certain map key. -// It needs to be resolved into a Map later on in the loader process. -type mapStub string - // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the // resulting MapSpec, and inner must be true on any resursive invocations. @@ -898,9 +1001,9 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem // skipped here. switch t := elf.ST_TYPE(r.Info); t { case elf.STT_FUNC: - contents = append(contents, MapKV{uint32(k), programStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) case elf.STT_OBJECT: - contents = append(contents, MapKV{uint32(k), mapStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) default: return nil, fmt.Errorf("unknown relocation type %v", t) } @@ -926,7 +1029,7 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { } var datasec *btf.Datasec - if err := ec.btf.FindType(sec.Name, &datasec); err != nil { + if err := ec.btf.TypeByName(sec.Name, &datasec); err != nil { return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err) } @@ -964,85 +1067,97 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { } func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := map[string]struct { + types := []struct { + prefix string progType ProgramType attachType AttachType progFlags uint32 }{ - // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - "socket": {SocketFilter, AttachNone, 0}, - "sk_reuseport/migrate": {SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, - "sk_reuseport": {SkReuseport, AttachSkReuseportSelect, 0}, - "seccomp": {SocketFilter, AttachNone, 0}, - "kprobe/": {Kprobe, AttachNone, 0}, - "uprobe/": {Kprobe, AttachNone, 0}, - "kretprobe/": {Kprobe, AttachNone, 0}, - "uretprobe/": {Kprobe, AttachNone, 0}, - "tracepoint/": {TracePoint, AttachNone, 0}, - "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, - "raw_tp/": {RawTracepoint, AttachNone, 0}, - "tp_btf/": {Tracing, AttachTraceRawTp, 0}, - "xdp": {XDP, AttachNone, 0}, - "perf_event": {PerfEvent, AttachNone, 0}, - "lwt_in": {LWTIn, AttachNone, 0}, - "lwt_out": {LWTOut, AttachNone, 0}, - "lwt_xmit": {LWTXmit, AttachNone, 0}, - "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, - "sockops": {SockOps, AttachCGroupSockOps, 0}, - "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0}, - "lirc_mode2": {LircMode2, AttachLircMode2, 0}, - "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, - "iter/": {Tracing, AttachTraceIter, 0}, - "fentry/": {Tracing, AttachTraceFEntry, 0}, - "fmod_ret/": {Tracing, AttachModifyReturn, 0}, - "fexit/": {Tracing, AttachTraceFExit, 0}, - "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - "sk_lookup/": {SkLookup, AttachSkLookup, 0}, - "freplace/": {Extension, AttachNone, 0}, - "lsm/": {LSM, AttachLSMMac, 0}, - "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - - "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, - "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, - "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, - "cgroup/skb": {CGroupSKB, AttachNone, 0}, - "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, - "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, - "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, - "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, - "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, - "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, - "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, - "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, - "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, - "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, - "classifier": {SchedCLS, AttachNone, 0}, - "action": {SchedACT, AttachNone, 0}, - - "cgroup/getsockname4": {CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, - "cgroup/getsockname6": {CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, - "cgroup/getpeername4": {CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, - "cgroup/getpeername6": {CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + // Please update the types from libbpf.c and follow the order of it. + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c + {"socket", SocketFilter, AttachNone, 0}, + {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, + {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0}, + {"kprobe/", Kprobe, AttachNone, 0}, + {"uprobe/", Kprobe, AttachNone, 0}, + {"kretprobe/", Kprobe, AttachNone, 0}, + {"uretprobe/", Kprobe, AttachNone, 0}, + {"tc", SchedCLS, AttachNone, 0}, + {"classifier", SchedCLS, AttachNone, 0}, + {"action", SchedACT, AttachNone, 0}, + {"tracepoint/", TracePoint, AttachNone, 0}, + {"tp/", TracePoint, AttachNone, 0}, + {"raw_tracepoint/", RawTracepoint, AttachNone, 0}, + {"raw_tp/", RawTracepoint, AttachNone, 0}, + {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0}, + {"raw_tp.w/", RawTracepointWritable, AttachNone, 0}, + {"tp_btf/", Tracing, AttachTraceRawTp, 0}, + {"fentry/", Tracing, AttachTraceFEntry, 0}, + {"fmod_ret/", Tracing, AttachModifyReturn, 0}, + {"fexit/", Tracing, AttachTraceFExit, 0}, + {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, + {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, + {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, + {"freplace/", Extension, AttachNone, 0}, + {"lsm/", LSM, AttachLSMMac, 0}, + {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, + {"iter/", Tracing, AttachTraceIter, 0}, + {"syscall", Syscall, AttachNone, 0}, + {"xdp_devmap/", XDP, AttachXDPDevMap, 0}, + {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0}, + {"xdp", XDP, AttachNone, 0}, + {"perf_event", PerfEvent, AttachNone, 0}, + {"lwt_in", LWTIn, AttachNone, 0}, + {"lwt_out", LWTOut, AttachNone, 0}, + {"lwt_xmit", LWTXmit, AttachNone, 0}, + {"lwt_seg6local", LWTSeg6Local, AttachNone, 0}, + {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0}, + {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0}, + {"cgroup/skb", CGroupSKB, AttachNone, 0}, + {"cgroup/sock_create", CGroupSKB, AttachCGroupInetSockCreate, 0}, + {"cgroup/sock_release", CGroupSKB, AttachCgroupInetSockRelease, 0}, + {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0}, + {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0}, + {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0}, + {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0}, + {"sockops", SockOps, AttachCGroupSockOps, 0}, + {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0}, + {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0}, + {"sk_skb", SkSKB, AttachNone, 0}, + {"sk_msg", SkMsg, AttachSkMsgVerdict, 0}, + {"lirc_mode2", LircMode2, AttachLircMode2, 0}, + {"flow_dissector", FlowDissector, AttachFlowDissector, 0}, + {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0}, + {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0}, + {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0}, + {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0}, + {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, + {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, + {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, + {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, + {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, + {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, + {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, + {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0}, + {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0}, + {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0}, + {"struct_ops+", StructOps, AttachNone, 0}, + {"sk_lookup/", SkLookup, AttachSkLookup, 0}, + + {"seccomp", SocketFilter, AttachNone, 0}, } - for prefix, t := range types { - if !strings.HasPrefix(sectionName, prefix) { + for _, t := range types { + if !strings.HasPrefix(sectionName, t.prefix) { continue } - if !strings.HasSuffix(prefix, "/") { + if !strings.HasSuffix(t.prefix, "/") { return t.progType, t.attachType, t.progFlags, "" } - return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] + return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):] } return UnspecifiedProgram, AttachNone, 0, "" diff --git a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go deleted file mode 100644 index 5f4e0a0ad02..00000000000 --- a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package ebpf - -import "bytes" - -func FuzzLoadCollectionSpec(data []byte) int { - spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} diff --git a/vendor/github.com/cilium/ebpf/go.mod b/vendor/github.com/cilium/ebpf/go.mod index f5edf690ab0..67a2e5f0833 100644 --- a/vendor/github.com/cilium/ebpf/go.mod +++ b/vendor/github.com/cilium/ebpf/go.mod @@ -3,7 +3,7 @@ module github.com/cilium/ebpf go 1.16 require ( - github.com/frankban/quicktest v1.11.3 - github.com/google/go-cmp v0.5.4 + github.com/frankban/quicktest v1.14.0 + github.com/google/go-cmp v0.5.6 golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 ) diff --git a/vendor/github.com/cilium/ebpf/go.sum b/vendor/github.com/cilium/ebpf/go.sum index 1ef5a4767e8..f503d16f928 100644 --- a/vendor/github.com/cilium/ebpf/go.sum +++ b/vendor/github.com/cilium/ebpf/go.sum @@ -1,13 +1,20 @@ -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go index 65fa4d7d850..cf692c762ef 100644 --- a/vendor/github.com/cilium/ebpf/info.go +++ b/vendor/github.com/cilium/ebpf/info.go @@ -2,6 +2,7 @@ package ebpf import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -10,9 +11,13 @@ import ( "strings" "syscall" "time" + "unsafe" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) // MapInfo describes a map. @@ -23,12 +28,13 @@ type MapInfo struct { ValueSize uint32 MaxEntries uint32 Flags uint32 - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string } -func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { - info, err := bpfGetMapInfoByFD(fd) +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newMapInfoFromProc(fd) } @@ -37,18 +43,17 @@ func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { } return &MapInfo{ - MapType(info.map_type), - MapID(info.id), - info.key_size, - info.value_size, - info.max_entries, - info.map_flags, - // name is available from 4.15. - internal.CString(info.name[:]), + MapType(info.Type), + MapID(info.Id), + info.KeySize, + info.ValueSize, + info.MaxEntries, + info.MapFlags, + unix.ByteSliceToString(info.Name[:]), }, nil } -func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { +func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { var mi MapInfo err := scanFdInfo(fd, map[string]interface{}{ "map_type": &mi.Type, @@ -84,20 +89,21 @@ type programStats struct { type ProgramInfo struct { Type ProgramType id ProgramID - // Truncated hash of the BPF bytecode. + // Truncated hash of the BPF bytecode. Available from 4.13. Tag string - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string - // BTF for the program. - btf btf.ID - // IDS map ids related to program. - ids []MapID + btf btf.ID stats *programStats + + maps []MapID + insns []byte } -func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { - info, err := bpfGetProgInfoByFD(fd, nil) +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newProgramInfoFromProc(fd) } @@ -105,32 +111,43 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { return nil, err } - var mapIDs []MapID - if info.nr_map_ids > 0 { - mapIDs = make([]MapID, info.nr_map_ids) - info, err = bpfGetProgInfoByFD(fd, mapIDs) - if err != nil { + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + stats: &programStats{ + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, + }, + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + } + + if info.NrMapIds > 0 || info.XlatedProgLen > 0 { + if err := sys.ObjInfo(fd, &info2); err != nil { return nil, err } } - return &ProgramInfo{ - Type: ProgramType(info.prog_type), - id: ProgramID(info.id), - // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. - Tag: hex.EncodeToString(info.tag[:]), - // name is available from 4.15. - Name: internal.CString(info.name[:]), - btf: btf.ID(info.btf_id), - ids: mapIDs, - stats: &programStats{ - runtime: time.Duration(info.run_time_ns), - runCount: info.run_cnt, - }, - }, nil + return &pi, nil } -func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { var info ProgramInfo err := scanFdInfo(fd, map[string]interface{}{ "prog_type": &info.Type, @@ -191,20 +208,47 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { return time.Duration(0), false } +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. +// +// The first instruction is marked as a symbol using the Program's name. +// +// Available from 4.13. Requires CAP_BPF or equivalent. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + // Tag the first instruction with the name of the program, if available. + insns[0] = insns[0].Sym(pi.Name) + + return insns, nil +} + // MapIDs returns the maps related to the program. // +// Available from 4.15. +// // The bool return value indicates whether this optional field is available. func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { - return pi.ids, pi.ids != nil + return pi.maps, pi.maps != nil } -func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { - raw, err := fd.Value() - if err != nil { - return err - } - - fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) if err != nil { return err } @@ -247,6 +291,10 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { return err } + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + if scanned != len(fields) { return errMissingFields } @@ -261,11 +309,9 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { // // Requires at least 5.8. func EnableStats(which uint32) (io.Closer, error) { - attr := internal.BPFEnableStatsAttr{ - StatsType: which, - } - - fd, err := internal.BPFEnableStats(&attr) + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go index 2b5f6d226a4..df4f78efd44 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf.go @@ -11,9 +11,9 @@ import ( "os" "reflect" "sync" - "unsafe" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -31,14 +31,23 @@ type ID uint32 // Spec represents decoded BTF. type Spec struct { - rawTypes []rawType - strings stringTable - types []Type - namedTypes map[string][]NamedType - funcInfos map[string]extInfo - lineInfos map[string]extInfo - coreRelos map[string]coreRelos - byteOrder binary.ByteOrder + // Data from .BTF. + rawTypes []rawType + strings stringTable + + // Inflated Types. + types []Type + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]Type + + // Data from .BTF.ext. + funcInfos map[string]FuncInfo + lineInfos map[string]LineInfos + coreRelos map[string]CoreRelos + + byteOrder binary.ByteOrder } type btfHeader struct { @@ -53,16 +62,45 @@ type btfHeader struct { StringLen uint32 } -// LoadSpecFromReader reads BTF sections from an ELF. +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. // -// Returns ErrNotFound if the reader contains no BTF. +// Returns ErrNotFound if reading from an ELF which contains no BTF. func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { file, err := internal.NewSafeELFFile(rd) if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + // Try to parse a naked BTF blob. This will return an error if + // we encounter a Datasec, since we can't fix it up. + return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil) + } + return nil, err } defer file.Close() + return loadSpecFromELF(file) +} + +// variableOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) { symbols, err := file.Symbols() if err != nil { return nil, fmt.Errorf("can't read symbols: %v", err) @@ -75,22 +113,23 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { continue } + if symbol.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + if int(symbol.Section) >= len(file.Sections) { return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) } secName := file.Sections[symbol.Section].Name - if symbol.Value > math.MaxUint32 { - return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) - } - variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) } - return loadSpecFromELF(file, variableOffsets) + return variableOffsets, nil } -func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) { +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { var ( btfSection *elf.Section btfExtSection *elf.Section @@ -120,7 +159,12 @@ func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]ui return nil, fmt.Errorf("btf: %w", ErrNotFound) } - spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) + vars, err := variableOffsets(file) + if err != nil { + return nil, err + } + + spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, vars) if err != nil { return nil, err } @@ -129,22 +173,96 @@ func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]ui return spec, nil } - spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) + if btfExtSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + extInfo, err := loadExtInfos(btfExtSection, file.ByteOrder, spec.strings) if err != nil { - return nil, fmt.Errorf("can't read ext info: %w", err) + return nil, fmt.Errorf("can't parse ext info: %w", err) + } + + if err := spec.splitExtInfos(extInfo); err != nil { + return nil, fmt.Errorf("linking funcInfos and lineInfos: %w", err) } return spec, nil } -// LoadRawSpec reads a blob of BTF data that isn't wrapped in an ELF file. -// -// Prefer using LoadSpecFromReader, since this function only supports a subset -// of BTF. -func LoadRawSpec(btf io.Reader, bo binary.ByteOrder) (*Spec, error) { - // This will return an error if we encounter a Datasec, since we can't fix - // it up. - return loadRawSpec(btf, bo, nil, nil) +// splitExtInfos takes FuncInfos, LineInfos and CoreRelos indexed by section and +// transforms them to be indexed by function. Retrieves function names from +// the BTF spec. +func (spec *Spec) splitExtInfos(info *extInfo) error { + + ofi := make(map[string]FuncInfo) + oli := make(map[string]LineInfos) + ocr := make(map[string]CoreRelos) + + for secName, secFuncs := range info.funcInfos { + // Collect functions from each section and organize them by name. + for _, fi := range secFuncs { + name, err := fi.Name(spec) + if err != nil { + return fmt.Errorf("looking up function name: %w", err) + } + + // FuncInfo offsets are scoped to the ELF section. Zero them out + // since they are meaningless outside of that context. The linker + // will determine the offset of the function within the final + // instruction stream before handing it off to the kernel. + fi.InsnOff = 0 + + ofi[name] = fi + } + + // Attribute LineInfo records to their respective functions, if any. + if lines := info.lineInfos[secName]; lines != nil { + for _, li := range lines { + fi := secFuncs.funcForOffset(li.InsnOff) + if fi == nil { + return fmt.Errorf("section %s: error looking up FuncInfo for LineInfo %v", secName, li) + } + + // Offsets are ELF section-scoped, make them function-scoped by + // subtracting the function's start offset. + li.InsnOff -= fi.InsnOff + + name, err := fi.Name(spec) + if err != nil { + return fmt.Errorf("looking up function name: %w", err) + } + + oli[name] = append(oli[name], li) + } + } + + // Attribute CO-RE relocations to their respective functions, if any. + if relos := info.relos[secName]; relos != nil { + for _, r := range relos { + fi := secFuncs.funcForOffset(r.insnOff) + if fi == nil { + return fmt.Errorf("section %s: error looking up FuncInfo for CO-RE relocation %v", secName, r) + } + + // Offsets are ELF section-scoped, make them function-scoped by + // subtracting the function's start offset. + r.insnOff -= fi.InsnOff + + name, err := fi.Name(spec) + if err != nil { + return fmt.Errorf("looking up function name: %w", err) + } + + ocr[name] = append(ocr[name], r) + } + } + } + + spec.funcInfos = ofi + spec.lineInfos = oli + spec.coreRelos = ocr + + return nil } func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { @@ -194,17 +312,31 @@ func LoadKernelSpec() (*Spec, error) { return kernelBTF.Spec, err } +// loadKernelSpec attempts to load the raw vmlinux BTF blob at +// /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. func loadKernelSpec() (*Spec, error) { - release, err := unix.KernelRelease() - if err != nil { - return nil, fmt.Errorf("can't read kernel release number: %w", err) - } - fh, err := os.Open("/sys/kernel/btf/vmlinux") if err == nil { defer fh.Close() - return LoadRawSpec(fh, internal.NativeEndian) + return loadRawSpec(fh, internal.NativeEndian, nil, nil) + } + + file, err := findVMLinux() + if err != nil { + return nil, err + } + defer file.Close() + + return loadSpecFromELF(file) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*internal.SafeELFFile, error) { + release, err := internal.KernelRelease() + if err != nil { + return nil, err } // use same list of locations as libbpf @@ -220,74 +352,82 @@ func loadKernelSpec() (*Spec, error) { } for _, loc := range locations { - path := fmt.Sprintf(loc, release) - - fh, err := os.Open(path) + fh, err := os.Open(fmt.Sprintf(loc, release)) if err != nil { continue } - defer fh.Close() - - file, err := internal.NewSafeELFFile(fh) - if err != nil { - return nil, err - } - defer file.Close() - - return loadSpecFromELF(file, nil) + return internal.NewSafeELFFile(fh) } - return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) } -func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := io.ReadAll(btf) - if err != nil { - return nil, nil, fmt.Errorf("can't read BTF: %v", err) - } - - rd := bytes.NewReader(rawBTF) - +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { var header btfHeader - if err := binary.Read(rd, bo, &header); err != nil { - return nil, nil, fmt.Errorf("can't read header: %v", err) + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) } if header.Magic != btfMagic { - return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) } if header.Version != 1 { - return nil, nil, fmt.Errorf("unexpected version %v", header.Version) + return nil, fmt.Errorf("unexpected version %v", header.Version) } if header.Flags != 0 { - return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + return nil, fmt.Errorf("unsupported flags %v", header.Flags) } remainder := int64(header.HdrLen) - int64(binary.Size(&header)) if remainder < 0 { - return nil, nil, errors.New("header is too short") + return nil, errors.New("header length shorter than btfHeader size") } - if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { - return nil, nil, fmt.Errorf("header padding: %v", err) + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) } - if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) + return &header, nil +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + if _, err := parseBTFHeader(io.NewSectionReader(r, 0, math.MaxInt64), bo); err == nil { + return bo + } } - rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { + rawBTF, err := io.ReadAll(btf) if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) + return nil, nil, fmt.Errorf("can't read BTF: %v", err) + } + rd := bytes.NewReader(rawBTF) + + header, err := parseBTFHeader(rd, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) } - if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) + buf := io.NewSectionReader(rd, header.stringStart(), int64(header.StringLen)) + rawStrings, err := readStringTable(buf) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) } - rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) + buf = io.NewSectionReader(rd, header.typeStart(), int64(header.TypeLen)) + rawTypes, err := readTypes(buf, bo) if err != nil { return nil, nil, fmt.Errorf("can't read types: %w", err) } @@ -353,11 +493,12 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s // Copy creates a copy of Spec. func (s *Spec) Copy() *Spec { types, _ := copyTypes(s.types, nil) - namedTypes := make(map[string][]NamedType) + + namedTypes := make(map[essentialName][]Type) for _, typ := range types { - if named, ok := typ.(NamedType); ok { - name := essentialName(named.TypeName()) - namedTypes[name] = append(namedTypes[name], named) + if name := typ.TypeName(); name != "" { + en := newEssentialName(name) + namedTypes[en] = append(namedTypes[en], typ) } } @@ -438,19 +579,13 @@ func (sw sliceWriter) Write(p []byte) (int, error) { return copy(sw, p), nil } -// Program finds the BTF for a specific section. -// -// Length is the number of bytes in the raw BPF instruction stream. +// Program finds the BTF for a specific function. // // Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't // contain extended BTF info. -func (s *Spec) Program(name string, length uint64) (*Program, error) { - if length == 0 { - return nil, errors.New("length musn't be zero") - } - +func (s *Spec) Program(name string) (*Program, error) { if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { - return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) + return nil, fmt.Errorf("BTF for function %s: %w", name, ErrNoExtendedInfo) } funcInfos, funcOK := s.funcInfos[name] @@ -458,20 +593,59 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) { relos, coreOK := s.coreRelos[name] if !funcOK && !lineOK && !coreOK { - return nil, fmt.Errorf("no extended BTF info for section %s", name) + return nil, fmt.Errorf("no extended BTF info for function %s", name) } - return &Program{s, length, funcInfos, lineInfos, relos}, nil + return &Program{s, funcInfos, lineInfos, relos}, nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + if int(id) > len(s.types) { + return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound) + } + return s.types[id], nil } -// FindType searches for a type with a specific name. +// AnyTypesByName returns a list of BTF Types with the given name. // -// Called T a type that satisfies Type, typ must be a non-nil **T. -// On success, the address of the found type will be copied in typ. +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + types := s.namedTypes[newEssentialName(name)] + if len(types) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(types)) + for _, t := range types { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if t.TypeName() == name { + result = append(result, t) + } + } + return result, nil +} + +// TypeByName searches for a Type with a specific name. Since multiple +// Types with the same name can exist, the parameter typ is taken to +// narrow down the search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. +// On success, the address of the found Type will be copied to typ. // // Returns an error wrapping ErrNotFound if no matching -// type exists in spec. -func (s *Spec) FindType(name string, typ interface{}) error { +// Type exists in the Spec. If multiple candidates are found, +// an error is returned. +func (s *Spec) TypeByName(name string, typ interface{}) error { typValue := reflect.ValueOf(typ) if typValue.Kind() != reflect.Ptr { return fmt.Errorf("%T is not a pointer", typ) @@ -487,17 +661,17 @@ func (s *Spec) FindType(name string, typ interface{}) error { return fmt.Errorf("%T does not satisfy Type interface", typ) } + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + var candidate Type - for _, typ := range s.namedTypes[essentialName(name)] { + for _, typ := range types { if reflect.TypeOf(typ) != wanted { continue } - // Match against the full name, not just the essential one. - if typ.TypeName() != name { - continue - } - if candidate != nil { return fmt.Errorf("type %s: multiple candidates for %T", name, typ) } @@ -517,7 +691,7 @@ func (s *Spec) FindType(name string, typ interface{}) error { // Handle is a reference to BTF loaded into the kernel. type Handle struct { spec *Spec - fd *internal.FD + fd *sys.FD } // NewHandle loads BTF into the kernel. @@ -544,18 +718,18 @@ func NewHandle(spec *Spec) (*Handle, error) { return nil, errors.New("BTF exceeds the maximum size") } - attr := &bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), } - fd, err := bpfLoadBTF(attr) + fd, err := sys.BtfLoad(attr) if err != nil { logBuf := make([]byte, 64*1024) - attr.logBuf = internal.NewSlicePointer(logBuf) - attr.btfLogSize = uint32(len(logBuf)) - attr.btfLogLevel = 1 - _, logErr := bpfLoadBTF(attr) + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogSize = uint32(len(logBuf)) + attr.BtfLogLevel = 1 + _, logErr := sys.BtfLoad(attr) return nil, internal.ErrorWithLog(err, logBuf, logErr) } @@ -568,7 +742,9 @@ func NewHandle(spec *Spec) (*Handle, error) { // // Requires CAP_SYS_ADMIN. func NewHandleFromID(id ID) (*Handle, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_BTF_GET_FD_BY_ID, uint32(id)) + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, fmt.Errorf("get BTF by id: %w", err) } @@ -596,12 +772,7 @@ func (h *Handle) Close() error { // FD returns the file descriptor for the handle. func (h *Handle) FD() int { - value, err := h.fd.Value() - if err != nil { - return -1 - } - - return int(value) + return h.fd.Int() } // Map is the BTF for a map. @@ -612,10 +783,10 @@ type Map struct { // Program is the BTF information for a stream of instructions. type Program struct { - spec *Spec - length uint64 - funcInfos, lineInfos extInfo - coreRelos coreRelos + spec *Spec + FuncInfo FuncInfo + LineInfos LineInfos + CoreRelos CoreRelos } // Spec returns the BTF spec of this program. @@ -623,54 +794,11 @@ func (p *Program) Spec() *Spec { return p.spec } -// Append the information from other to the Program. -func (p *Program) Append(other *Program) error { - if other.spec != p.spec { - return fmt.Errorf("can't append program with different BTF specs") - } - - funcInfos, err := p.funcInfos.append(other.funcInfos, p.length) - if err != nil { - return fmt.Errorf("func infos: %w", err) - } - - lineInfos, err := p.lineInfos.append(other.lineInfos, p.length) - if err != nil { - return fmt.Errorf("line infos: %w", err) - } - - p.funcInfos = funcInfos - p.lineInfos = lineInfos - p.coreRelos = p.coreRelos.append(other.coreRelos, p.length) - p.length += other.length - return nil -} - -// FuncInfos returns the binary form of BTF function infos. -func (p *Program) FuncInfos() (recordSize uint32, bytes []byte, err error) { - bytes, err = p.funcInfos.MarshalBinary() - if err != nil { - return 0, nil, fmt.Errorf("func infos: %w", err) - } - - return p.funcInfos.recordSize, bytes, nil -} - -// LineInfos returns the binary form of BTF line infos. -func (p *Program) LineInfos() (recordSize uint32, bytes []byte, err error) { - bytes, err = p.lineInfos.MarshalBinary() - if err != nil { - return 0, nil, fmt.Errorf("line infos: %w", err) - } - - return p.lineInfos.recordSize, bytes, nil -} - // Fixups returns the changes required to adjust the program to the target. // // Passing a nil target will relocate against the running kernel. func (p *Program) Fixups(target *Spec) (COREFixups, error) { - if len(p.coreRelos) == 0 { + if len(p.CoreRelos) == 0 { return nil, nil } @@ -682,24 +810,7 @@ func (p *Program) Fixups(target *Spec) (COREFixups, error) { } } - return coreRelocate(p.spec, target, p.coreRelos) -} - -type bpfLoadBTFAttr struct { - btf internal.Pointer - logBuf internal.Pointer - btfSize uint32 - btfLogSize uint32 - btfLogLevel uint32 -} - -func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { - fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil + return coreRelocate(p.spec, target, p.CoreRelos) } func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { @@ -744,9 +855,9 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { btf := marshalBTF(&types, strings, internal.NativeEndian) - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), }) if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { // Treat both EINVAL and EPERM as not supported: loading the program @@ -782,9 +893,9 @@ var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() err btf := marshalBTF(&types, strings, internal.NativeEndian) - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), }) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go index d02df9d50bb..95908308a15 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/core.go @@ -97,7 +97,7 @@ func (f COREFixup) isNonExistant() bool { type COREFixups map[uint64]COREFixup -// Apply a set of CO-RE relocations to a BPF program. +// Apply returns a copy of insns with CO-RE relocations applied. func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { if len(fs) == 0 { cpy := make(asm.Instructions, len(insns)) @@ -191,13 +191,13 @@ func (k COREKind) checksForExistence() bool { return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists } -func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { +func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) { if local.byteOrder != target.byteOrder { return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) } var ids []TypeID - relosByID := make(map[TypeID]coreRelos) + relosByID := make(map[TypeID]CoreRelos) result := make(COREFixups, len(relos)) for _, relo := range relos { if relo.kind == reloTypeIDLocal { @@ -234,13 +234,13 @@ func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { } localType := local.types[id] - named, ok := localType.(NamedType) - if !ok || named.TypeName() == "" { + localTypeName := localType.TypeName() + if localTypeName == "" { return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } relos := relosByID[id] - targets := target.namedTypes[essentialName(named.TypeName())] + targets := target.namedTypes[newEssentialName(localTypeName)] fixups, err := coreCalculateFixups(localType, targets, relos) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) @@ -262,9 +262,9 @@ var errImpossibleRelocation = errors.New("impossible relocation") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]COREFixup, error) { +func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFixup, error) { localID := local.ID() - local, err := copyType(local, skipQualifierAndTypedef) + local, err := copyType(local, skipQualifiersAndTypedefs) if err != nil { return nil, err } @@ -273,7 +273,7 @@ func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]CO var bestFixups []COREFixup for i := range targets { targetID := targets[i].ID() - target, err := copyType(targets[i], skipQualifierAndTypedef) + target, err := copyType(targets[i], skipQualifiersAndTypedefs) if err != nil { return nil, err } @@ -326,7 +326,7 @@ func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]CO // coreCalculateFixup calculates the fixup for a single local type, target type // and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) { +func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CoreRelo) (COREFixup, error) { fixup := func(local, target uint32) (COREFixup, error) { return COREFixup{relo.kind, local, target, false}, nil } @@ -704,9 +704,9 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal return nil, nil, errImpossibleRelocation } - localName := essentialName(localValue.Name) + localName := newEssentialName(localValue.Name) for i, targetValue := range targetEnum.Values { - if essentialName(targetValue.Name) != localName { + if newEssentialName(targetValue.Name) != localName { continue } @@ -831,7 +831,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { return nil } - if essentialName(a) == essentialName(b) { + if newEssentialName(a) == newEssentialName(b) { return nil } @@ -872,7 +872,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { } } -func skipQualifierAndTypedef(typ Type) (Type, error) { +func skipQualifiersAndTypedefs(typ Type) (Type, error) { result := typ for depth := 0; depth <= maxTypeDepth; depth++ { switch v := (result).(type) { @@ -886,3 +886,16 @@ func skipQualifierAndTypedef(typ Type) (Type, error) { } return nil, errors.New("exceeded type depth") } + +func skipQualifiers(typ Type) (Type, error) { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result, nil + } + } + return nil, errors.New("exceeded type depth") +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go index cdae2ec4082..c4da1e489d9 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go @@ -1,22 +1,73 @@ package btf import ( - "bufio" - "bytes" "encoding/binary" "errors" "fmt" "io" + "math" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" ) +// extInfo contains extended program metadata. +// +// It is indexed per section. +type extInfo struct { + funcInfos map[string]FuncInfos + lineInfos map[string]LineInfos + relos map[string]CoreRelos +} + +// loadExtInfos parses the .BTF.ext section into its constituent parts. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, strings stringTable) (*extInfo, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCoreHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + funcInfos, err := parseFuncInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + lineInfos, err := parseLineInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + relos := make(map[string]CoreRelos) + if coreHeader != nil && coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.CoreReloLen)) + relos, err = parseCoreRelos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + } + + return &extInfo{funcInfos, lineInfos, relos}, nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. type btfExtHeader struct { Magic uint16 Version uint8 Flags uint8 - HdrLen uint32 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCoreHeader. + HdrLen uint32 FuncInfoOff uint32 FuncInfoLen uint32 @@ -24,195 +75,341 @@ type btfExtHeader struct { LineInfoLen uint32 } -type btfExtCoreHeader struct { - CoreReloOff uint32 - CoreReloLen uint32 -} - -func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) { +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { var header btfExtHeader - var coreHeader btfExtCoreHeader if err := binary.Read(r, bo, &header); err != nil { - return nil, nil, nil, fmt.Errorf("can't read header: %v", err) + return nil, fmt.Errorf("can't read header: %v", err) } if header.Magic != btfMagic { - return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) } if header.Version != 1 { - return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) + return nil, fmt.Errorf("unexpected version %v", header.Version) } if header.Flags != 0 { - return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + return nil, fmt.Errorf("unsupported flags %v", header.Flags) } - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, nil, errors.New("header is too short") + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") } - coreHdrSize := int64(binary.Size(&coreHeader)) - if remainder >= coreHdrSize { - if err := binary.Read(r, bo, &coreHeader); err != nil { - return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) - } - remainder -= coreHdrSize + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCoreHeader) int64 { + return int64(h.HdrLen + ch.CoreReloOff) +} + +// btfExtCoreHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCoreHeader struct { + CoreReloOff uint32 + CoreReloLen uint32 +} + +// parseBTFExtCoreHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCoreHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCoreHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil } - // Of course, the .BTF.ext header has different semantics than the - // .BTF ext header. We need to ignore non-null values. - _, err = io.CopyN(io.Discard, r, remainder) - if err != nil { - return nil, nil, nil, fmt.Errorf("header padding: %v", err) + var coreHeader btfExtCoreHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) } - if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) } - buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) - funcInfo, err = parseExtInfo(buf, bo, strings) + secName, err := strings.Lookup(infoHeader.SecNameOff) if err != nil { - return nil, nil, nil, fmt.Errorf("function info: %w", err) + return "", nil, fmt.Errorf("get section name: %w", err) } - - if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") } - buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) - lineInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("line info: %w", err) + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) } - if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { - if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) - } + return secName, &infoHeader, nil +} - relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) - } +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) } - return funcInfo, lineInfo, relos, nil + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil } -type btfExtInfoSec struct { - SecNameOff uint32 - NumInfo uint32 +// FuncInfo represents the location and type ID of a function in a BPF ELF. +type FuncInfo struct { + // Instruction offset of the function within an ELF section. + // Always zero after parsing a funcinfo from an ELF, instruction streams + // are split on function boundaries. + InsnOff uint32 + TypeID TypeID } -type extInfoRecord struct { - InsnOff uint64 - Opaque []byte +// Name looks up the FuncInfo's corresponding function name in the given spec. +func (fi FuncInfo) Name(spec *Spec) (string, error) { + // Look up function name based on type ID. + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return "", fmt.Errorf("looking up type by ID: %w", err) + } + if _, ok := typ.(*Func); !ok { + return "", fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if name := typ.TypeName(); name != "" { + return name, nil + } + + return "", fmt.Errorf("Func with type ID %d doesn't have a name", fi.TypeID) } -type extInfo struct { - byteOrder binary.ByteOrder - recordSize uint32 - records []extInfoRecord +// Marshal writes the binary representation of the FuncInfo to w. +// The function offset is converted from bytes to instructions. +func (fi FuncInfo) Marshal(w io.Writer, offset uint64) error { + fi.InsnOff += uint32(offset) + // The kernel expects offsets in number of raw bpf instructions, + // while the ELF tracks it in bytes. + fi.InsnOff /= asm.InstructionSize + return binary.Write(w, internal.NativeEndian, fi) } -func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { - if other.byteOrder != ei.byteOrder { - return extInfo{}, fmt.Errorf("ext_info byte order mismatch, want %v (got %v)", ei.byteOrder, other.byteOrder) +type FuncInfos []FuncInfo + +// funcForOffset returns the function that the instruction at the given +// ELF section offset belongs to. +// +// For example, consider an ELF section that contains 3 functions (a, b, c) +// at offsets 0, 10 and 15 respectively. Offset 5 will return function a, +// offset 12 will return b, offset >= 15 will return c, etc. +func (infos FuncInfos) funcForOffset(offset uint32) *FuncInfo { + for n, fi := range infos { + // Iterator went past the offset the caller is looking for, + // no point in continuing the search. + if offset < fi.InsnOff { + return nil + } + + // If there is no next item in the list, or if the given offset + // is smaller than the next function, the offset belongs to + // the current function. + if n+1 >= len(infos) || offset < infos[n+1].InsnOff { + return &fi + } } - if other.recordSize != ei.recordSize { - return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) + return nil +} + +// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]FuncInfos, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err } - records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) - records = append(records, ei.records...) - for _, info := range other.records { - records = append(records, extInfoRecord{ - InsnOff: info.InsnOff + offset, - Opaque: info.Opaque, - }) + result := make(map[string]FuncInfos) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records } - return extInfo{ei.byteOrder, ei.recordSize, records}, nil } -func (ei extInfo) MarshalBinary() ([]byte, error) { - if ei.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("%s is not the native byte order", ei.byteOrder) - } +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (FuncInfos, error) { + var out FuncInfos + var fi FuncInfo - if len(ei.records) == 0 { - return nil, nil + if exp, got := uint32(binary.Size(fi)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) } - buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) - for _, info := range ei.records { - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - insnOff := uint32(info.InsnOff / asm.InstructionSize) - if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { - return nil, fmt.Errorf("can't write instruction offset: %v", err) + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) } - buf.Write(info.Opaque) + out = append(out, fi) } - return buf.Bytes(), nil + return out, nil } -func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { - const maxRecordSize = 256 +// LineInfo represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type LineInfo struct { + // Instruction offset of the function within an ELF section. + // After parsing a LineInfo from an ELF, this offset is relative to + // the function body instead of an ELF section. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("can't read record size: %v", err) +// Marshal writes the binary representation of the LineInfo to w. +// The instruction offset is converted from bytes to instructions. +func (li LineInfo) Marshal(w io.Writer, offset uint64) error { + li.InsnOff += uint32(offset) + // The kernel expects offsets in number of raw bpf instructions, + // while the ELF tracks it in bytes. + li.InsnOff /= asm.InstructionSize + return binary.Write(w, internal.NativeEndian, li) +} + +type LineInfos []LineInfo + +// Marshal writes the binary representation of the LineInfos to w. +func (li LineInfos) Marshal(w io.Writer, off uint64) error { + if len(li) == 0 { + return nil } - if recordSize < 4 { - // Need at least insnOff - return nil, errors.New("record size too short") + for _, info := range li { + if err := info.Marshal(w, off); err != nil { + return err + } } - if recordSize > maxRecordSize { - return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + + return nil +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]LineInfos, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err } - result := make(map[string]extInfo) + result := make(map[string]LineInfos) for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) if errors.Is(err, io.EOF) { return result, nil } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } - var records []extInfoRecord - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var byteOff uint32 - if err := binary.Read(r, bo, &byteOff); err != nil { - return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) - } + result[secName] = records + } +} - buf := make([]byte, int(recordSize-4)) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) - } +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (LineInfos, error) { + var out LineInfos + var li LineInfo - if byteOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) - } + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } - records = append(records, extInfoRecord{uint64(byteOff), buf}) + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) } - result[secName] = extInfo{ - bo, - recordSize, - records, + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) } + + out = append(out, li) } + + return out, nil } -// bpfCoreRelo matches `struct bpf_core_relo` from the kernel +// bpfCoreRelo matches the kernel's struct bpf_core_relo. type bpfCoreRelo struct { InsnOff uint32 TypeID TypeID @@ -220,93 +417,81 @@ type bpfCoreRelo struct { Kind COREKind } -type coreRelo struct { +type CoreRelo struct { insnOff uint32 typeID TypeID accessor coreAccessor kind COREKind } -type coreRelos []coreRelo - -// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted -// by offset. -func (r coreRelos) append(other coreRelos, offset uint64) coreRelos { - result := make([]coreRelo, 0, len(r)+len(other)) - result = append(result, r...) - for _, relo := range other { - relo.insnOff += uint32(offset) - result = append(result, relo) - } - return result -} +type CoreRelos []CoreRelo var extInfoReloSize = binary.Size(bpfCoreRelo{}) -func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) { - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("read record size: %v", err) +// parseCoreRelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCoreRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]CoreRelos, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err } if recordSize != uint32(extInfoReloSize) { return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) } - result := make(map[string]coreRelos) + result := make(map[string]CoreRelos) for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) if errors.Is(err, io.EOF) { return result, nil } + if err != nil { + return nil, err + } - var relos coreRelos - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var relo bpfCoreRelo - if err := binary.Read(r, bo, &relo); err != nil { - return nil, fmt.Errorf("section %v: read record: %v", secName, err) - } - - if relo.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) - } - - accessorStr, err := strings.Lookup(relo.AccessStrOff) - if err != nil { - return nil, err - } - - accessor, err := parseCoreAccessor(accessorStr) - if err != nil { - return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) - } - - relos = append(relos, coreRelo{ - relo.InsnOff, - relo.TypeID, - accessor, - relo.Kind, - }) + records, err := parseCoreReloRecords(r, bo, recordSize, infoHeader.NumInfo, strings) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) } - result[secName] = relos + result[secName] = records } } -func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { - var infoHeader btfExtInfoSec - if err := binary.Read(r, bo, &infoHeader); err != nil { - return "", nil, fmt.Errorf("read ext info header: %w", err) - } +// parseCoreReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCoreReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, strings stringTable) (CoreRelos, error) { + var out CoreRelos - secName, err := strings.Lookup(infoHeader.SecNameOff) - if err != nil { - return "", nil, fmt.Errorf("get section name: %w", err) - } + var relo bpfCoreRelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } - if infoHeader.NumInfo == 0 { - return "", nil, fmt.Errorf("section %s has zero records", secName) + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCoreAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + out = append(out, CoreRelo{ + relo.InsnOff, + relo.TypeID, + accessor, + relo.Kind, + }) } - return secName, &infoHeader, nil + return out, nil } diff --git a/vendor/github.com/cilium/ebpf/internal/btf/format.go b/vendor/github.com/cilium/ebpf/internal/btf/format.go new file mode 100644 index 00000000000..159319c33c9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/format.go @@ -0,0 +1,304 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + typ, err := skipQualifiers(typ) + if err != nil { + return err + } + + switch v := typ.(type) { + case *Enum: + fmt.Fprintf(&gf.w, "type %s int32", name) + if len(v.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range v.Values { + id := gf.enumIdentifier(name, ev.Name) + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value) + } + gf.w.WriteString(")") + + return nil + } + + fmt.Fprintf(&gf.w, "type %s ", name) + return gf.writeTypeLit(typ, 0) +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ, err := skipQualifiers(typ) + if err != nil { + return err + } + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + typ, err := skipQualifiers(typ) + if err != nil { + return err + } + + switch v := typ.(type) { + case *Int: + gf.writeIntLit(v) + + case *Enum: + gf.w.WriteString("int32") + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + default: + return fmt.Errorf("type %s: %w", typ, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) { + // NB: Encoding.IsChar is ignored. + if i.Encoding.IsBool() && i.Size == 1 { + gf.w.WriteString("bool") + return + } + + bits := i.Size * 8 + if i.Encoding.IsSigned() { + fmt.Fprintf(&gf.w, "int%d", bits) + } else { + fmt.Fprintf(&gf.w, "uint%d", bits) + } +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.OffsetBits / 8 + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + size, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + prevOffset = offset + uint32(size) + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.OffsetBits%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.OffsetBits) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v := vsi.Type.(*Var) + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go deleted file mode 100644 index 220b285afe0..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package btf - -import ( - "bytes" - "encoding/binary" - - "github.com/cilium/ebpf/internal" -) - -func FuzzSpec(data []byte) int { - if len(data) < binary.Size(btfHeader{}) { - return -1 - } - - spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} - -func FuzzExtInfo(data []byte) int { - if len(data) < binary.Size(btfExtHeader{}) { - return -1 - } - - table := stringTable("\x00foo\x00barfoo\x00") - info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) - if err != nil { - if info != nil { - panic("info is not nil") - } - return 0 - } - if info == nil { - panic("info is nil") - } - return 1 -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/info.go b/vendor/github.com/cilium/ebpf/internal/btf/info.go index 6a9b5d2e0ba..dd44a0be675 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/info.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/info.go @@ -4,6 +4,8 @@ import ( "bytes" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) // info describes a BTF object. @@ -18,19 +20,20 @@ type info struct { KernelBTF bool } -func newInfoFromFd(fd *internal.FD) (*info, error) { +func newInfoFromFd(fd *sys.FD) (*info, error) { // We invoke the syscall once with a empty BTF and name buffers to get size // information to allocate buffers. Then we invoke it a second time with // buffers to receive the data. - bpfInfo, err := bpfGetBTFInfoByFD(fd, nil, nil) - if err != nil { + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { return nil, err } - btfBuffer := make([]byte, bpfInfo.btfSize) - nameBuffer := make([]byte, bpfInfo.nameLen) - bpfInfo, err = bpfGetBTFInfoByFD(fd, btfBuffer, nameBuffer) - if err != nil { + btfBuffer := make([]byte, btfInfo.BtfSize) + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { return nil, err } @@ -41,8 +44,8 @@ func newInfoFromFd(fd *internal.FD) (*info, error) { return &info{ BTF: spec, - ID: ID(bpfInfo.id), - Name: internal.CString(nameBuffer), - KernelBTF: bpfInfo.kernelBTF != 0, + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + KernelBTF: btfInfo.KernelBtf != 0, }, nil } diff --git a/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go b/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go deleted file mode 100644 index a4f80abd011..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go +++ /dev/null @@ -1,31 +0,0 @@ -package btf - -import ( - "fmt" - "unsafe" - - "github.com/cilium/ebpf/internal" -) - -type bpfBTFInfo struct { - btf internal.Pointer - btfSize uint32 - id uint32 - name internal.Pointer - nameLen uint32 - kernelBTF uint32 -} - -func bpfGetBTFInfoByFD(fd *internal.FD, btf, name []byte) (*bpfBTFInfo, error) { - info := bpfBTFInfo{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - name: internal.NewSlicePointer(name), - nameLen: uint32(len(name)), - } - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - - return &info, nil -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go index 5c8e7c6e59d..a6b5a10aae5 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/types.go @@ -18,9 +18,12 @@ func (tid TypeID) ID() TypeID { // Type represents a type described by BTF. type Type interface { + // The type ID of the Type within this BTF spec. ID() TypeID - String() string + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string // Make a copy of the type, without copying Type members. copy() Type @@ -28,37 +31,32 @@ type Type interface { // Enumerate all nested Types. Repeated calls must visit nested // types in the same order. walk(*typeDeque) -} -// NamedType is a type with a name. -type NamedType interface { - Type - - // Name of the type, empty for anonymous types. - TypeName() string + String() string } var ( - _ NamedType = (*Int)(nil) - _ NamedType = (*Struct)(nil) - _ NamedType = (*Union)(nil) - _ NamedType = (*Enum)(nil) - _ NamedType = (*Fwd)(nil) - _ NamedType = (*Func)(nil) - _ NamedType = (*Typedef)(nil) - _ NamedType = (*Var)(nil) - _ NamedType = (*Datasec)(nil) - _ NamedType = (*Float)(nil) + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) ) // Void is the unit type of BTF. type Void struct{} -func (v *Void) ID() TypeID { return 0 } -func (v *Void) String() string { return "void#0" } -func (v *Void) size() uint32 { return 0 } -func (v *Void) copy() Type { return (*Void)(nil) } -func (v *Void) walk(*typeDeque) {} +func (v *Void) ID() TypeID { return 0 } +func (v *Void) String() string { return "void#0" } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } +func (v *Void) walk(*typeDeque) {} type IntEncoding byte @@ -68,9 +66,22 @@ const ( Bool ) +func (ie IntEncoding) IsSigned() bool { + return ie&Signed != 0 +} + +func (ie IntEncoding) IsChar() bool { + return ie&Char != 0 +} + +func (ie IntEncoding) IsBool() bool { + return ie&Bool != 0 +} + // Int is an integer of a given length. type Int struct { TypeID + Name string // The size of the integer in bytes. @@ -86,12 +97,12 @@ func (i *Int) String() string { var s strings.Builder switch { - case i.Encoding&Char != 0: + case i.Encoding.IsChar(): s.WriteString("char") - case i.Encoding&Bool != 0: + case i.Encoding.IsBool(): s.WriteString("bool") default: - if i.Encoding&Signed == 0 { + if !i.Encoding.IsSigned() { s.WriteRune('u') } s.WriteString("int") @@ -129,6 +140,7 @@ func (p *Pointer) String() string { return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) } +func (p *Pointer) TypeName() string { return "" } func (p *Pointer) size() uint32 { return 8 } func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } func (p *Pointer) copy() Type { @@ -147,6 +159,8 @@ func (arr *Array) String() string { return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) } +func (arr *Array) TypeName() string { return "" } + func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } func (arr *Array) copy() Type { cpy := *arr @@ -343,6 +357,8 @@ func (v *Volatile) String() string { return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) } +func (v *Volatile) TypeName() string { return "" } + func (v *Volatile) qualify() Type { return v.Type } func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } func (v *Volatile) copy() Type { @@ -360,6 +376,8 @@ func (c *Const) String() string { return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) } +func (c *Const) TypeName() string { return "" } + func (c *Const) qualify() Type { return c.Type } func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } func (c *Const) copy() Type { @@ -377,6 +395,8 @@ func (r *Restrict) String() string { return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) } +func (r *Restrict) TypeName() string { return "" } + func (r *Restrict) qualify() Type { return r.Type } func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } func (r *Restrict) copy() Type { @@ -421,6 +441,8 @@ func (fp *FuncProto) String() string { return s.String() } +func (fp *FuncProto) TypeName() string { return "" } + func (fp *FuncProto) walk(tdq *typeDeque) { tdq.push(&fp.Return) for i := range fp.Params { @@ -594,6 +616,12 @@ func Sizeof(typ Type) (int, error) { return 0, fmt.Errorf("type %s: exceeded type depth", typ) } +// Copy a Type recursively. +func Copy(typ Type) Type { + typ, _ = copyType(typ, nil) + return typ +} + // copy a Type recursively. // // typ may form a cycle. @@ -735,7 +763,7 @@ func (dq *typeDeque) all() []*Type { // Returns a map of named types (so, where NameOff is non-zero) and a slice of types // indexed by TypeID. Since BTF ignores compilation units, multiple types may share // the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]NamedType, err error) { +func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[essentialName][]Type, err error) { type fixupDef struct { id TypeID expectedKind btfKind @@ -774,7 +802,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, types = make([]Type, 0, len(rawTypes)) types = append(types, (*Void)(nil)) - namedTypes = make(map[string][]NamedType) + namedTypes = make(map[essentialName][]Type) for i, raw := range rawTypes { var ( @@ -918,10 +946,8 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, types = append(types, typ) - if named, ok := typ.(NamedType); ok { - if name := essentialName(named.TypeName()); name != "" { - namedTypes[name] = append(namedTypes[name], named) - } + if name := newEssentialName(typ.TypeName()); name != "" { + namedTypes[name] = append(namedTypes[name], typ) } } @@ -947,11 +973,20 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, return types, namedTypes, nil } -// essentialName returns name without a ___ suffix. -func essentialName(name string) string { +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { lastIdx := strings.LastIndex(name, "___") if lastIdx > 0 { - return name[:lastIdx] + return essentialName(name[:lastIdx]) } - return name + return essentialName(name) } diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go index 54a4313130a..758d34f7075 100644 --- a/vendor/github.com/cilium/ebpf/internal/elf.go +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -66,3 +66,14 @@ func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { syms, err = se.File.DynamicSymbols() return } + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go index 877bd72ee26..b9716cd612f 100644 --- a/vendor/github.com/cilium/ebpf/internal/errors.go +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "errors" "fmt" "strings" @@ -15,7 +14,7 @@ import ( // logErr should be the error returned by the syscall that generated // the log. It is used to check for truncation of the output. func ErrorWithLog(err error, log []byte, logErr error) error { - logStr := strings.Trim(CString(log), "\t\r\n ") + logStr := strings.Trim(unix.ByteSliceToString(log), "\t\r\n ") if errors.Is(logErr, unix.ENOSPC) { logStr += " (truncated...)" } @@ -40,12 +39,3 @@ func (le *VerifierError) Error() string { return fmt.Sprintf("%s: %s", le.cause, le.log) } - -// CString turns a NUL / zero terminated byte buffer into a string. -func CString(in []byte) string { - inLen := bytes.IndexByte(in, 0) - if inLen == -1 { - return "" - } - return string(in[:inLen]) -} diff --git a/vendor/github.com/cilium/ebpf/internal/fd.go b/vendor/github.com/cilium/ebpf/internal/fd.go deleted file mode 100644 index af04955bd53..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/fd.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "github.com/cilium/ebpf/internal/unix" -) - -var ErrClosedFd = errors.New("use of closed file descriptor") - -type FD struct { - raw int64 -} - -func NewFD(value uint32) *FD { - fd := &FD{int64(value)} - runtime.SetFinalizer(fd, (*FD).Close) - return fd -} - -func (fd *FD) String() string { - return strconv.FormatInt(fd.raw, 10) -} - -func (fd *FD) Value() (uint32, error) { - if fd.raw < 0 { - return 0, ErrClosedFd - } - - return uint32(fd.raw), nil -} - -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - value := int(fd.raw) - fd.raw = -1 - - fd.Forget() - return unix.Close(value) -} - -func (fd *FD) Forget() { - runtime.SetFinalizer(fd, nil) -} - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return NewFD(uint32(dup)), nil -} - -func (fd *FD) File(name string) *os.File { - fd.Forget() - return os.NewFile(uintptr(fd.raw), name) -} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go index fa7402782d7..7177e596aa2 100644 --- a/vendor/github.com/cilium/ebpf/internal/io.go +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -1,6 +1,35 @@ package internal -import "errors" +import ( + "bufio" + "compress/gzip" + "errors" + "io" + "os" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) io.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} // DiscardZeroes makes sure that all written bytes are zero // before discarding them. @@ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) { } return len(p), nil } + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 00000000000..aeab37fcfaf --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,84 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go index 5329b432d72..9fa3146c70b 100644 --- a/vendor/github.com/cilium/ebpf/internal/pinning.go +++ b/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -4,24 +4,43 @@ import ( "errors" "fmt" "os" + "path/filepath" + "runtime" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) -func Pin(currentPath, newPath string, fd *FD) error { +func Pin(currentPath, newPath string, fd *sys.FD) error { + const bpfFSType = 0xcafe4a11 + if newPath == "" { return errors.New("given pinning path cannot be empty") } if currentPath == newPath { return nil } + + var statfs unix.Statfs_t + if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil { + return err + } else if uint64(statfs.Type) != bpfFSType { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + if currentPath == "" { - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } - var err error + // Renameat2 is used instead of os.Rename to disallow the new path replacing // an existing path. - if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil { + err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { // Object is now moved to the new pinning path. return nil } @@ -29,7 +48,10 @@ func Pin(currentPath, newPath string, fd *FD) error { return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) } // Internal state not in sync with the file system so let's fix it. - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } func Unpin(pinnedPath string) error { diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 00000000000..4b7245e2d0a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,4 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../btf/testdata/vmlinux-btf.gz diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 00000000000..65517d45e26 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,96 @@ +package sys + +import ( + "fmt" + "math" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).Close) + return fd +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + value := int(fd.raw) + fd.raw = -1 + + fd.Forget() + return unix.Close(value) +} + +func (fd *FD) Forget() { + runtime.SetFinalizer(fd, nil) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +func (fd *FD) File(name string) *os.File { + fd.Forget() + return os.NewFile(uintptr(fd.raw), name) +} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go similarity index 71% rename from vendor/github.com/cilium/ebpf/internal/ptr.go rename to vendor/github.com/cilium/ebpf/internal/sys/ptr.go index f295de72cfe..a221006888d 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -1,4 +1,4 @@ -package internal +package sys import ( "unsafe" @@ -20,6 +20,13 @@ func NewSlicePointer(buf []byte) Pointer { return Pointer{ptr: unsafe.Pointer(&buf[0])} } +// NewSlicePointer creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + // NewStringPointer creates a 64-bit pointer from a string. func NewStringPointer(str string) Pointer { p, err := unix.BytePtrFromString(str) diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go similarity index 93% rename from vendor/github.com/cilium/ebpf/internal/ptr_32_be.go rename to vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go index 8c114ddf476..df903d780b1 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -1,7 +1,7 @@ //go:build armbe || mips || mips64p32 // +build armbe mips mips64p32 -package internal +package sys import ( "unsafe" diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go similarity index 94% rename from vendor/github.com/cilium/ebpf/internal/ptr_32_le.go rename to vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go index e65a61e45d3..a6a51edb6e1 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -1,7 +1,7 @@ //go:build 386 || amd64p32 || arm || mipsle || mips64p32le // +build 386 amd64p32 arm mipsle mips64p32le -package internal +package sys import ( "unsafe" diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go similarity index 95% rename from vendor/github.com/cilium/ebpf/internal/ptr_64.go rename to vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go index 71a3afe307b..7c0279e487c 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_64.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -1,7 +1,7 @@ //go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 // +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 -package internal +package sys import ( "unsafe" diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 00000000000..dd515f0eba3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,123 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 00000000000..ab40cef6def --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,954 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +type AdjRoomMode int32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType int32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + __MAX_BPF_ATTACH_TYPE AttachType = 39 +) + +type Cmd int32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId int32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + __BPF_FUNC_MAX_ID FunctionId = 166 +) + +type HdrStartOff int32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType int32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + MAX_BPF_LINK_TYPE LinkType = 7 +) + +type MapType int32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 +) + +type ProgType int32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 +) + +type RetCode int32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 +) + +type SkAction int32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus int32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType int32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type XdpAction int32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id uint32 + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [16]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId uint32 + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + _ [4]byte +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfId uint32 + FuncInfoRecSize uint32 + FuncInfo uint64 + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo uint64 + JitedLineInfo uint64 + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + _ [4]byte +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId uint32 + _ [12]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [4]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + BtfVmlinuxValueTypeId uint32 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel uint32 + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId uint32 + AttachProgFd uint32 + _ [4]byte +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + CgroupId uint64 + AttachType AttachType + _ [4]byte +} + +type IterLinkInfo struct { + TargetName Pointer + TargetNameLen uint32 +} + +type NetNsLinkInfo struct { + NetnsIno uint32 + AttachType AttachType +} + +type RawTracepointLinkInfo struct { + TpName Pointer + TpNameLen uint32 + _ [4]byte +} + +type TracingLinkInfo struct { + AttachType AttachType + TargetObjId uint32 + TargetBtfId uint32 +} + +type XDPLinkInfo struct{ Ifindex uint32 } diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go deleted file mode 100644 index b75037bb9d6..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/syscall.go +++ /dev/null @@ -1,304 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "path/filepath" - "runtime" - "syscall" - "unsafe" - - "github.com/cilium/ebpf/internal/unix" -) - -//go:generate stringer -output syscall_string.go -type=BPFCmd - -// BPFCmd identifies a subcommand of the bpf syscall. -type BPFCmd int - -// Well known BPF commands. -const ( - BPF_MAP_CREATE BPFCmd = iota - BPF_MAP_LOOKUP_ELEM - BPF_MAP_UPDATE_ELEM - BPF_MAP_DELETE_ELEM - BPF_MAP_GET_NEXT_KEY - BPF_PROG_LOAD - BPF_OBJ_PIN - BPF_OBJ_GET - BPF_PROG_ATTACH - BPF_PROG_DETACH - BPF_PROG_TEST_RUN - BPF_PROG_GET_NEXT_ID - BPF_MAP_GET_NEXT_ID - BPF_PROG_GET_FD_BY_ID - BPF_MAP_GET_FD_BY_ID - BPF_OBJ_GET_INFO_BY_FD - BPF_PROG_QUERY - BPF_RAW_TRACEPOINT_OPEN - BPF_BTF_LOAD - BPF_BTF_GET_FD_BY_ID - BPF_TASK_FD_QUERY - BPF_MAP_LOOKUP_AND_DELETE_ELEM - BPF_MAP_FREEZE - BPF_BTF_GET_NEXT_ID - BPF_MAP_LOOKUP_BATCH - BPF_MAP_LOOKUP_AND_DELETE_BATCH - BPF_MAP_UPDATE_BATCH - BPF_MAP_DELETE_BATCH - BPF_LINK_CREATE - BPF_LINK_UPDATE - BPF_LINK_GET_FD_BY_ID - BPF_LINK_GET_NEXT_ID - BPF_ENABLE_STATS - BPF_ITER_CREATE -) - -// BPF wraps SYS_BPF. -// -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err -} - -type BPFProgLoadAttr struct { - ProgType uint32 - InsCount uint32 - Instructions Pointer - License Pointer - LogLevel uint32 - LogSize uint32 - LogBuf Pointer - KernelVersion uint32 // since 4.1 2541517c32be - ProgFlags uint32 // since 4.11 e07b98d9bffe - ProgName BPFObjName // since 4.15 067cae47771c - ProgIfIndex uint32 // since 4.15 1f6f4cb7ba21 - ExpectedAttachType uint32 // since 4.17 5e43f899b03a - ProgBTFFd uint32 - FuncInfoRecSize uint32 - FuncInfo Pointer - FuncInfoCnt uint32 - LineInfoRecSize uint32 - LineInfo Pointer - LineInfoCnt uint32 - AttachBTFID uint32 - AttachProgFd uint32 -} - -// BPFProgLoad wraps BPF_PROG_LOAD. -func BPFProgLoad(attr *BPFProgLoadAttr) (*FD, error) { - for { - fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errors.Is(err, unix.EAGAIN) { - continue - } - - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil - } -} - -type BPFProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 -} - -func BPFProgAttach(attr *BPFProgAttachAttr) error { - _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 -} - -func BPFProgDetach(attr *BPFProgDetachAttr) error { - _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFEnableStatsAttr struct { - StatsType uint32 -} - -func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { - ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, fmt.Errorf("enable stats: %w", err) - } - return NewFD(uint32(ptr)), nil - -} - -type bpfObjAttr struct { - fileName Pointer - fd uint32 - fileFlags uint32 -} - -const bpfFSType = 0xcafe4a11 - -// BPFObjPin wraps BPF_OBJ_PIN. -func BPFObjPin(fileName string, fd *FD) error { - dirName := filepath.Dir(fileName) - var statfs unix.Statfs_t - if err := unix.Statfs(dirName, &statfs); err != nil { - return err - } - if uint64(statfs.Type) != bpfFSType { - return fmt.Errorf("%s is not on a bpf filesystem", fileName) - } - - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fd: value, - } - _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("pin object %s: %w", fileName, err) - } - return nil -} - -// BPFObjGet wraps BPF_OBJ_GET. -func BPFObjGet(fileName string, flags uint32) (*FD, error) { - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fileFlags: flags, - } - ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return nil, fmt.Errorf("get object %s: %w", fileName, err) - } - return NewFD(uint32(ptr)), nil -} - -type bpfObjGetInfoByFDAttr struct { - fd uint32 - infoLen uint32 - info Pointer -} - -// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. -// -// Available from 4.13. -func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjGetInfoByFDAttr{ - fd: value, - infoLen: uint32(size), - info: NewPointer(info), - } - _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("fd %v: %w", fd, err) - } - return nil -} - -type bpfGetFDByIDAttr struct { - id uint32 - next uint32 -} - -// BPFObjGetInfoByFD wraps BPF_*_GET_FD_BY_ID. -// -// Available from 4.13. -func BPFObjGetFDByID(cmd BPFCmd, id uint32) (*FD, error) { - attr := bpfGetFDByIDAttr{ - id: id, - } - ptr, err := BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return NewFD(uint32(ptr)), err -} - -// BPFObjName is a null-terminated string made up of -// 'A-Za-z0-9_' characters. -type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte - -// NewBPFObjName truncates the result if it is too long. -func NewBPFObjName(name string) BPFObjName { - var result BPFObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) - return result -} - -type BPFMapCreateAttr struct { - MapType uint32 - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - Flags uint32 - InnerMapFd uint32 // since 4.12 56f668dfe00d - NumaNode uint32 // since 4.14 96eabe7a40aa - MapName BPFObjName // since 4.15 ad5b177bd73f - MapIfIndex uint32 - BTFFd uint32 - BTFKeyTypeID uint32 - BTFValueTypeID uint32 -} - -func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) { - fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil -} - -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with -// syscall.E* or unix.E* constants. -// -// You should never export an error of this type. -type wrappedErrno struct { - syscall.Errno -} - -func (we wrappedErrno) Unwrap() error { - return we.Errno -} - -type syscallError struct { - error - errno syscall.Errno -} - -func SyscallError(err error, errno syscall.Errno) error { - return &syscallError{err, errno} -} - -func (se *syscallError) Is(target error) bool { - return target == se.error -} - -func (se *syscallError) Unwrap() error { - return se.errno -} diff --git a/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/vendor/github.com/cilium/ebpf/internal/syscall_string.go deleted file mode 100644 index 85df0477973..00000000000 --- a/vendor/github.com/cilium/ebpf/internal/syscall_string.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. - -package internal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BPF_MAP_CREATE-0] - _ = x[BPF_MAP_LOOKUP_ELEM-1] - _ = x[BPF_MAP_UPDATE_ELEM-2] - _ = x[BPF_MAP_DELETE_ELEM-3] - _ = x[BPF_MAP_GET_NEXT_KEY-4] - _ = x[BPF_PROG_LOAD-5] - _ = x[BPF_OBJ_PIN-6] - _ = x[BPF_OBJ_GET-7] - _ = x[BPF_PROG_ATTACH-8] - _ = x[BPF_PROG_DETACH-9] - _ = x[BPF_PROG_TEST_RUN-10] - _ = x[BPF_PROG_GET_NEXT_ID-11] - _ = x[BPF_MAP_GET_NEXT_ID-12] - _ = x[BPF_PROG_GET_FD_BY_ID-13] - _ = x[BPF_MAP_GET_FD_BY_ID-14] - _ = x[BPF_OBJ_GET_INFO_BY_FD-15] - _ = x[BPF_PROG_QUERY-16] - _ = x[BPF_RAW_TRACEPOINT_OPEN-17] - _ = x[BPF_BTF_LOAD-18] - _ = x[BPF_BTF_GET_FD_BY_ID-19] - _ = x[BPF_TASK_FD_QUERY-20] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] - _ = x[BPF_MAP_FREEZE-22] - _ = x[BPF_BTF_GET_NEXT_ID-23] - _ = x[BPF_MAP_LOOKUP_BATCH-24] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] - _ = x[BPF_MAP_UPDATE_BATCH-26] - _ = x[BPF_MAP_DELETE_BATCH-27] - _ = x[BPF_LINK_CREATE-28] - _ = x[BPF_LINK_UPDATE-29] - _ = x[BPF_LINK_GET_FD_BY_ID-30] - _ = x[BPF_LINK_GET_NEXT_ID-31] - _ = x[BPF_ENABLE_STATS-32] - _ = x[BPF_ITER_CREATE-33] -} - -const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" - -var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} - -func (i BPFCmd) String() string { - if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { - return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] -} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 9aa70fa78c2..0a7c648a66d 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -4,7 +4,6 @@ package unix import ( - "bytes" "syscall" linux "golang.org/x/sys/unix" @@ -23,6 +22,7 @@ const ( ENODEV = linux.ENODEV EBADF = linux.EBADF E2BIG = linux.E2BIG + EFAULT = linux.EFAULT // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -66,11 +66,16 @@ const ( PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE AT_FDCWD = linux.AT_FDCWD RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET ) // Statfs_t is a wrapper type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t + // Rlimit is a wrapper type Rlimit = linux.Rlimit @@ -191,18 +196,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) } -func KernelRelease() (string, error) { - var uname Utsname - err := Uname(&uname) - if err != nil { - return "", err - } +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} - end := bytes.IndexByte(uname.Release[:], 0) - release := string(uname.Release[:end]) - return release, nil +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) } -func Prlimit(pid, resource int, new, old *Rlimit) error { - return linux.Prlimit(pid, resource, new, old) +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) } diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 4f50d896ebb..abd8ea93dd1 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -23,6 +23,7 @@ const ( ENODEV = syscall.ENODEV EBADF = syscall.Errno(0) E2BIG = syscall.Errno(0) + EFAULT = syscall.EFAULT // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -67,6 +68,9 @@ const ( PERF_RECORD_SAMPLE = 9 AT_FDCWD = -0x2 RENAME_NOREPLACE = 0x1 + SO_ATTACH_BPF = 0x32 + SO_DETACH_BPF = 0x1b + SOL_SOCKET = 0x1 ) // Statfs_t is a wrapper @@ -85,6 +89,8 @@ type Statfs_t struct { Spare [4]int64 } +type Stat_t struct{} + // Rlimit is a wrapper type Rlimit struct { Cur uint64 @@ -258,10 +264,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return errNonLinux } -func KernelRelease() (string, error) { - return "", errNonLinux +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux } -func Prlimit(pid, resource int, new, old *Rlimit) error { +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { return errNonLinux } diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go new file mode 100644 index 00000000000..ae4821de20c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -0,0 +1,150 @@ +package internal + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + // Read data from the auxiliary vector, which is normally passed directly + // to the process. Go does not expose that data, so we must read it from procfs. + // https://man7.org/linux/man-pages/man3/getauxval.3.html + av, err := os.Open("/proc/self/auxv") + if err != nil { + return 0, fmt.Errorf("opening auxv: %w", err) + } + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r io.Reader) (uint64, error) { + const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image + ) + + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + aux := struct{ Tag, Val uint64 }{} + for { + if err := binary.Read(r, NativeEndian, &aux); err != nil { + return 0, fmt.Errorf("reading auxv entry: %w", err) + } + + switch aux.Tag { + case _AT_SYSINFO_EHDR: + if aux.Val != 0 { + return aux.Val, nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, Align(int(n.NameSize), 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go index 4915e583767..370e01e4447 100644 --- a/vendor/github.com/cilium/ebpf/internal/version.go +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,8 +2,6 @@ package internal import ( "fmt" - "os" - "regexp" "sync" "github.com/cilium/ebpf/internal/unix" @@ -18,12 +16,6 @@ const ( ) var ( - // Match between one and three decimals separated by dots, with the last - // segment (patch level) being optional on some kernels. - // The x.y.z string must appear at the start of a string or right after - // whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'. - rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`) - kernelVersion = struct { once sync.Once version Version @@ -46,6 +38,15 @@ func NewVersion(ver string) (Version, error) { return Version{major, minor, patch}, nil } +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + func (v Version) String() string { if v[2] == 0 { return fmt.Sprintf("v%d.%d", v[0], v[1]) @@ -98,66 +99,24 @@ func KernelVersion() (Version, error) { return kernelVersion.version, nil } -// detectKernelVersion returns the version of the running kernel. It scans the -// following sources in order: /proc/version_signature, uname -v, uname -r. -// In each of those locations, the last-appearing x.y(.z) value is selected -// for parsing. The first location that yields a usable version number is -// returned. +// detectKernelVersion returns the version of the running kernel. func detectKernelVersion() (Version, error) { - - // Try reading /proc/version_signature for Ubuntu compatibility. - // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 - // This method exists in the kernel itself, see d18acd15c - // ("perf tools: Fix kernel version error in ubuntu"). - if pvs, err := os.ReadFile("/proc/version_signature"); err == nil { - // If /proc/version_signature exists, failing to parse it is an error. - // It only exists on Ubuntu, where the real patch level is not obtainable - // through any other method. - v, err := findKernelVersion(string(pvs)) - if err != nil { - return Version{}, err - } - return v, nil - } - - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return Version{}, fmt.Errorf("calling uname: %w", err) - } - - // Debian puts the version including the patch level in uname.Version. - // It is not an error if there's no version number in uname.Version, - // as most distributions don't use it. Parsing can continue on uname.Release. - // Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) - if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil { - return v, nil - } - - // Most other distributions have the full kernel version including patch - // level in uname.Release. - // Example format: 4.19.0-5-amd64, 5.5.10-arch1-1 - v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:])) + vc, err := vdsoVersion() if err != nil { return Version{}, err } - - return v, nil + return NewVersionFromCode(vc), nil } -// findKernelVersion matches s against rgxKernelVersion and parses the result -// into a Version. If s contains multiple matches, the last entry is selected. -func findKernelVersion(s string) (Version, error) { - m := rgxKernelVersion.FindAllString(s, -1) - if m == nil { - return Version{}, fmt.Errorf("no kernel version in string: %s", s) - } - // Pick the last match of the string in case there are multiple. - s = m[len(m)-1] - - v, err := NewVersion(s) - if err != nil { - return Version{}, fmt.Errorf("parsing version string %s: %w", s, err) +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) } - return v, nil + return unix.ByteSliceToString(uname.Release[:]), nil } diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go index 5540bb068cd..b3e65cfecd8 100644 --- a/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -57,6 +57,8 @@ func AttachCgroup(opts CgroupOptions) (Link, error) { } // LoadPinnedCgroup loads a pinned cgroup from a bpffs. +// +// Deprecated: use LoadPinnedLink instead. func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { link, err := LoadPinnedRawLink(fileName, CgroupType, opts) if err != nil { @@ -151,6 +153,10 @@ func (cg *progAttachCgroup) Unpin() error { return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) } +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + type linkCgroup struct { RawLink } diff --git a/vendor/github.com/cilium/ebpf/link/freplace.go b/vendor/github.com/cilium/ebpf/link/freplace.go deleted file mode 100644 index a698e1a9d30..00000000000 --- a/vendor/github.com/cilium/ebpf/link/freplace.go +++ /dev/null @@ -1,88 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal/btf" -) - -type FreplaceLink struct { - RawLink -} - -// AttachFreplace attaches the given eBPF program to the function it replaces. -// -// The program and name can either be provided at link time, or can be provided -// at program load time. If they were provided at load time, they should be nil -// and empty respectively here, as they will be ignored by the kernel. -// Examples: -// -// AttachFreplace(dispatcher, "function", replacement) -// AttachFreplace(nil, "", replacement) -func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (*FreplaceLink, error) { - if (name == "") != (targetProg == nil) { - return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) - } - if prog == nil { - return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) - } - if prog.Type() != ebpf.Extension { - return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) - } - - var ( - target int - typeID btf.TypeID - ) - if targetProg != nil { - info, err := targetProg.Info() - if err != nil { - return nil, err - } - btfID, ok := info.BTFID() - if !ok { - return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) - } - btfHandle, err := btf.NewHandleFromID(btfID) - if err != nil { - return nil, err - } - defer btfHandle.Close() - - var function *btf.Func - if err := btfHandle.Spec().FindType(name, &function); err != nil { - return nil, err - } - - target = targetProg.FD() - typeID = function.ID() - } - - link, err := AttachRawLink(RawLinkOptions{ - Target: target, - Program: prog, - Attach: ebpf.AttachNone, - BTF: typeID, - }) - if err != nil { - return nil, err - } - - return &FreplaceLink{*link}, nil -} - -// Update implements the Link interface. -func (f *FreplaceLink) Update(new *ebpf.Program) error { - return fmt.Errorf("freplace update: %w", ErrNotSupported) -} - -// LoadPinnedFreplace loads a pinned iterator from a bpffs. -func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (*FreplaceLink, error) { - link, err := LoadPinnedRawLink(fileName, TracingType, opts) - if err != nil { - return nil, err - } - - return &FreplaceLink{*link}, err -} diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go index 654d34ef848..289733e4709 100644 --- a/vendor/github.com/cilium/ebpf/link/iter.go +++ b/vendor/github.com/cilium/ebpf/link/iter.go @@ -6,7 +6,7 @@ import ( "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type IterOptions struct { @@ -31,26 +31,26 @@ func AttachIter(opts IterOptions) (*Iter, error) { progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var info bpfIterLinkInfoMap if opts.Map != nil { mapFd := opts.Map.FD() if mapFd < 0 { - return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) } info.map_fd = uint32(mapFd) } - attr := bpfLinkCreateIterAttr{ - prog_fd: uint32(progFd), - attach_type: ebpf.AttachTraceIter, - iter_info: internal.NewPointer(unsafe.Pointer(&info)), - iter_info_len: uint32(unsafe.Sizeof(info)), + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), } - fd, err := bpfLinkCreateIter(&attr) + fd, err := sys.LinkCreateIter(&attr) if err != nil { return nil, fmt.Errorf("can't link iterator: %w", err) } @@ -59,6 +59,8 @@ func AttachIter(opts IterOptions) (*Iter, error) { } // LoadPinnedIter loads a pinned iterator from a bpffs. +// +// Deprecated: use LoadPinnedLink instead. func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { link, err := LoadPinnedRawLink(fileName, IterType, opts) if err != nil { @@ -77,16 +79,11 @@ type Iter struct { // // Reading from the returned reader triggers the BPF program. func (it *Iter) Open() (io.ReadCloser, error) { - linkFd, err := it.fd.Value() - if err != nil { - return nil, err - } - - attr := &bpfIterCreateAttr{ - linkFd: linkFd, + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), } - fd, err := bpfIterCreate(attr) + fd, err := sys.IterCreate(attr) if err != nil { return nil, fmt.Errorf("can't create iterator: %w", err) } diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go index b6577b5a992..6b896360d64 100644 --- a/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -12,7 +12,7 @@ import ( "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -28,6 +28,13 @@ var ( type probeType uint8 +type probeArgs struct { + symbol, group, path string + offset, refCtrOffset uint64 + pid int + ret bool +} + const ( kprobeType probeType = iota uprobeType @@ -131,10 +138,17 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) } + args := probeArgs{ + pid: perfAllThreads, + symbol: platformPrefix(symbol), + ret: ret, + } + // Use kprobe PMU if the kernel has it available. - tp, err := pmuKprobe(platformPrefix(symbol), ret) + tp, err := pmuKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = pmuKprobe(symbol, ret) + args.symbol = symbol + tp, err = pmuKprobe(args) } if err == nil { return tp, nil @@ -144,9 +158,11 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { } // Use tracefs if kprobe PMU is missing. - tp, err = tracefsKprobe(platformPrefix(symbol), ret) + args.symbol = platformPrefix(symbol) + tp, err = tracefsKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = tracefsKprobe(symbol, ret) + args.symbol = symbol + tp, err = tracefsKprobe(args) } if err != nil { return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) @@ -157,8 +173,8 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { // pmuKprobe opens a perf event based on the kprobe PMU. // Returns os.ErrNotExist if the given symbol does not exist in the kernel. -func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { - return pmuProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +func pmuKprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(kprobeType, args) } // pmuProbe opens a perf event based on a Performance Monitoring Unit. @@ -168,7 +184,7 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" // // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { +func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { // Getting the PMU type will fail if the kernel doesn't support // the perf_[k,u]probe PMU. et, err := getPMUEventType(typ) @@ -177,7 +193,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo } var config uint64 - if ret { + if args.ret { bit, err := typ.RetprobeBit() if err != nil { return nil, err @@ -192,7 +208,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo switch typ { case kprobeType: // Create a pointer to a NUL-terminated string for the kernel. - sp, err = unsafeStringPtr(symbol) + sp, err = unsafeStringPtr(args.symbol) if err != nil { return nil, err } @@ -203,11 +219,15 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo Config: config, // Retprobe flag } case uprobeType: - sp, err = unsafeStringPtr(path) + sp, err = unsafeStringPtr(args.path) if err != nil { return nil, err } + if args.refCtrOffset != 0 { + config |= args.refCtrOffset << uprobeRefCtrOffsetShift + } + attr = unix.PerfEventAttr{ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, // since it added the config2 (Ext2) field. The Size field controls the @@ -216,23 +236,23 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo Size: unix.PERF_ATTR_SIZE_VER1, Type: uint32(et), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Uprobe path - Ext2: offset, // Uprobe offset - Config: config, // Retprobe flag + Ext2: args.offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag } } - fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) + return nil, fmt.Errorf("symbol '%s' not found: %w", args.symbol, os.ErrNotExist) } // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned // when attempting to set a uprobe on a trap instruction. if errors.Is(err, unix.ENOTSUPP) { - return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", offset, err) + return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err) } if err != nil { return nil, fmt.Errorf("opening perf event: %w", err) @@ -241,18 +261,23 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo // Ensure the string pointer is not collected before PerfEventOpen returns. runtime.KeepAlive(sp) + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + // Kernel has perf_[k,u]probe PMU available, initialize perf event. return &perfEvent{ - fd: internal.NewFD(uint32(fd)), + fd: fd, pmuID: et, - name: symbol, - typ: typ.PerfEventType(ret), + name: args.symbol, + typ: typ.PerfEventType(args.ret), }, nil } // tracefsKprobe creates a Kprobe tracefs entry. -func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { - return tracefsProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +func tracefsKprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(kprobeType, args) } // tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. @@ -261,7 +286,7 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { // Path and offset are only set in the case of uprobe(s) and are used to set // the executable/library path on the filesystem and the offset where the probe is inserted. // A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { +func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) { // Generate a random string for each trace event we attempt to create. // This value is used as the 'group' token in tracefs to allow creating // multiple kprobe trace events with the same name. @@ -269,32 +294,33 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, re if err != nil { return nil, fmt.Errorf("randomizing group name: %w", err) } + args.group = group // Before attempting to create a trace event through tracefs, // check if an event with the same group and name already exists. // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate // entry, so we need to rely on reads for detecting uniqueness. - _, err = getTraceEventID(group, symbol) + _, err = getTraceEventID(group, args.symbol) if err == nil { - return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol) + return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol) } if err != nil && !errors.Is(err, os.ErrNotExist) { - return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err) + return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err) } // Create the [k,u]probe trace event using tracefs. - if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil { + if err := createTraceFSProbeEvent(typ, args); err != nil { return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) } // Get the newly-created trace event's id. - tid, err := getTraceEventID(group, symbol) + tid, err := getTraceEventID(group, args.symbol) if err != nil { return nil, fmt.Errorf("getting trace event id: %w", err) } // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid, pid) + fd, err := openTracepointPerfEvent(tid, args.pid) if err != nil { return nil, err } @@ -302,9 +328,9 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, re return &perfEvent{ fd: fd, group: group, - name: symbol, + name: args.symbol, tracefsID: tid, - typ: typ.PerfEventType(ret), + typ: typ.PerfEventType(args.ret), }, nil } @@ -312,7 +338,7 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, re // /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid // kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist // if a probe with the same group and symbol already exists. -func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error { +func createTraceFSProbeEvent(typ probeType, args probeArgs) error { // Open the kprobe_events file in tracefs. f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) if err != nil { @@ -337,7 +363,7 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // subsampling or rate limiting logic can be more accurately implemented in // the eBPF program itself. // See Documentation/kprobes.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, args.symbol) case uprobeType: // The uprobe_events syntax is as follows: // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe @@ -346,18 +372,17 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // // Some examples: // r:ebpf_1234/readline /bin/bash:0x12345 - // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) // // See Documentation/trace/uprobetracer.txt for more details. - pathOffset := uprobePathOffset(path, offset) - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, uprobeToken(args)) } _, err = f.WriteString(pe) // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist) + return fmt.Errorf("symbol %s not found: %w", args.symbol, os.ErrNotExist) } if err != nil { return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go index 4926584696b..3aa49a68e35 100644 --- a/vendor/github.com/cilium/ebpf/link/link.go +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -1,12 +1,14 @@ package link import ( + "bytes" + "encoding/binary" "fmt" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" ) var ErrNotSupported = internal.ErrNotSupported @@ -35,12 +37,53 @@ type Link interface { // not called. Close() error + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + // Prevent external users from implementing this interface. isLink() } +// LoadPinnedLink loads a link that was persisted into a bpffs. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (Link, error) { + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + default: + return raw, nil + } +} + // ID uniquely identifies a BPF link. -type ID uint32 +type ID = sys.LinkID // RawLinkOptions control the creation of a raw link. type RawLinkOptions struct { @@ -52,13 +95,58 @@ type RawLinkOptions struct { Attach ebpf.AttachType // BTF is the BTF of the attachment target. BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 } -// RawLinkInfo contains metadata on a link. -type RawLinkInfo struct { +// Info contains metadata on a link. +type Info struct { Type Type ID ID Program ebpf.ProgramID + extra interface{} +} + +// RawLinkInfo contains information on a raw link. +// +// Deprecated: use Info instead. +type RawLinkInfo = Info + +type TracingInfo sys.TracingLinkInfo +type CgroupInfo sys.CgroupLinkInfo +type NetNsInfo sys.NetNsLinkInfo +type XDPInfo sys.XDPLinkInfo + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// ExtraNetNs returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e } // RawLink is the low-level API to bpf_link. @@ -66,7 +154,7 @@ type RawLinkInfo struct { // You should consider using the higher level interfaces in this // package instead. type RawLink struct { - fd *internal.FD + fd *sys.FD pinnedPath string } @@ -77,21 +165,22 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { } if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) } progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } - attr := bpfLinkCreateAttr{ - targetFd: uint32(opts.Target), - progFd: uint32(progFd), - attachType: opts.Attach, - targetBTFID: uint32(opts.BTF), + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: uint32(opts.BTF), + Flags: opts.Flags, } - fd, err := bpfLinkCreate(&attr) + fd, err := sys.LinkCreate(&attr) if err != nil { return nil, fmt.Errorf("can't create link: %s", err) } @@ -103,13 +192,14 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { // // Returns an error if the pinned link type doesn't match linkType. Pass // UnspecifiedType to disable this behaviour. +// +// Deprecated: use LoadPinnedLink instead. func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + link, err := loadPinnedRawLink(fileName, opts) if err != nil { - return nil, fmt.Errorf("load pinned link: %w", err) + return nil, err } - link := &RawLink{fd, fileName} if linkType == UnspecifiedType { return link, nil } @@ -117,7 +207,7 @@ func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions info, err := link.Info() if err != nil { link.Close() - return nil, fmt.Errorf("get pinned link info: %s", err) + return nil, fmt.Errorf("get pinned link info: %w", err) } if info.Type != linkType { @@ -128,15 +218,23 @@ func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions return link, nil } +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, fmt.Errorf("load pinned link: %w", err) + } + + return &RawLink{fd, fileName}, nil +} + func (l *RawLink) isLink() {} // FD returns the raw file descriptor. func (l *RawLink) FD() int { - fd, err := l.fd.Value() - if err != nil { - return -1 - } - return int(fd) + return l.fd.Int() } // Close breaks the link. @@ -185,49 +283,64 @@ type RawLinkUpdateOptions struct { func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { newFd := opts.New.FD() if newFd < 0 { - return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var oldFd int if opts.Old != nil { oldFd = opts.Old.FD() if oldFd < 0 { - return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) } } - linkFd, err := l.fd.Value() - if err != nil { - return fmt.Errorf("can't update link: %s", err) + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, } - - attr := bpfLinkUpdateAttr{ - linkFd: linkFd, - newProgFd: uint32(newFd), - oldProgFd: uint32(oldFd), - flags: opts.Flags, - } - return bpfLinkUpdate(&attr) -} - -// struct bpf_link_info -type bpfLinkInfo struct { - typ uint32 - id uint32 - prog_id uint32 + return sys.LinkUpdate(&attr) } // Info returns metadata about the link. -func (l *RawLink) Info() (*RawLinkInfo, error) { - var info bpfLinkInfo - err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { return nil, fmt.Errorf("link info: %s", err) } - return &RawLinkInfo{ - Type(info.typ), - ID(info.id), - ebpf.ProgramID(info.prog_id), + var extra interface{} + switch info.Type { + case CgroupType: + extra = &CgroupInfo{} + case IterType: + // not supported + case NetNsType: + extra = &NetNsInfo{} + case RawTracepointType: + // not supported + case TracingType: + extra = &TracingInfo{} + case XDPType: + extra = &XDPInfo{} + default: + return nil, fmt.Errorf("unknown link info type: %d", info.Type) + } + + if info.Type != RawTracepointType && info.Type != IterType { + buf := bytes.NewReader(info.Extra[:]) + err := binary.Read(buf, internal.NativeEndian, extra) + if err != nil { + return nil, fmt.Errorf("can not read extra link info: %w", err) + } + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, }, nil } diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go index 37e5b84c4dd..f49cbe4d73b 100644 --- a/vendor/github.com/cilium/ebpf/link/netns.go +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -6,14 +6,9 @@ import ( "github.com/cilium/ebpf" ) -// NetNsInfo contains metadata about a network namespace link. -type NetNsInfo struct { - RawLinkInfo -} - // NetNsLink is a program attached to a network namespace. type NetNsLink struct { - *RawLink + RawLink } // AttachNetNs attaches a program to a network namespace. @@ -37,24 +32,17 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { return nil, err } - return &NetNsLink{link}, nil + return &NetNsLink{*link}, nil } // LoadPinnedNetNs loads a network namespace link from bpffs. +// +// Deprecated: use LoadPinnedLink instead. func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { link, err := LoadPinnedRawLink(fileName, NetNsType, opts) if err != nil { return nil, err } - return &NetNsLink{link}, nil -} - -// Info returns information about the link. -func (nns *NetNsLink) Info() (*NetNsInfo, error) { - info, err := nns.RawLink.Info() - if err != nil { - return nil, err - } - return &NetNsInfo{*info}, nil + return &NetNsLink{*link}, nil } diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go index 7e0443a75cb..ef24660f44a 100644 --- a/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -13,7 +13,7 @@ import ( "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -82,7 +82,7 @@ type perfEvent struct { // The event type determines the types of programs that can be attached. typ perfEventType - fd *internal.FD + fd *sys.FD } func (pe *perfEvent) isLink() {} @@ -109,17 +109,16 @@ func (pe *perfEvent) Update(prog *ebpf.Program) error { return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) } +func (pe *perfEvent) Info() (*Info, error) { + return nil, fmt.Errorf("can't get perf event info: %w", ErrNotSupported) +} + func (pe *perfEvent) Close() error { if pe.fd == nil { return nil } - pfd, err := pe.fd.Value() - if err != nil { - return fmt.Errorf("getting perf event fd: %w", err) - } - - err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0) + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_DISABLE, 0) if err != nil { return fmt.Errorf("disabling perf event: %w", err) } @@ -159,7 +158,7 @@ func (pe *perfEvent) attach(prog *ebpf.Program) error { return errors.New("cannot attach to nil perf event") } if prog.FD() < 0 { - return fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } switch pe.typ { case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: @@ -174,8 +173,7 @@ func (pe *perfEvent) attach(prog *ebpf.Program) error { return fmt.Errorf("unknown perf event type: %d", pe.typ) } - // The ioctl below will fail when the fd is invalid. - kfd, _ := pe.fd.Value() + kfd := pe.fd.Int() // Assign the eBPF program to the perf event. err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) @@ -235,7 +233,7 @@ func getPMUEventType(typ probeType) (uint64, error) { // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide // [k,u]probes created by writing to /[k,u]probe_events are tracepoints // behind the scenes, and can be attached to using these perf events. -func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { attr := unix.PerfEventAttr{ Type: unix.PERF_TYPE_TRACEPOINT, Config: tid, @@ -249,7 +247,7 @@ func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { return nil, fmt.Errorf("opening tracepoint perf event: %w", err) } - return internal.NewFD(uint32(fd)), nil + return sys.NewFD(fd) } // uint64FromFile reads a uint64 from a file. All elements of path are sanitized diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go index b90c4574676..ea31817377f 100644 --- a/vendor/github.com/cilium/ebpf/link/program.go +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawAttachProgramOptions struct { @@ -34,7 +34,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { replaceFd = uint32(opts.Replace.FD()) } - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), ReplaceBpfFd: replaceFd, @@ -42,7 +42,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { AttachFlags: uint32(opts.Flags), } - if err := internal.BPFProgAttach(&attr); err != nil { + if err := sys.ProgAttach(&attr); err != nil { return fmt.Errorf("can't attach program: %w", err) } return nil @@ -63,12 +63,12 @@ func RawDetachProgram(opts RawDetachProgramOptions) error { return err } - attr := internal.BPFProgDetachAttr{ + attr := sys.ProgDetachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), AttachType: uint32(opts.Attach), } - if err := internal.BPFProgDetach(&attr); err != nil { + if err := sys.ProgDetach(&attr); err != nil { return fmt.Errorf("can't detach program: %w", err) } diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index f4beb1e0786..925e621cbbc 100644 --- a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -1,10 +1,11 @@ package link import ( + "errors" "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawTracepointOptions struct { @@ -22,40 +23,65 @@ func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) } if opts.Program.FD() < 0 { - return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } - fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ - name: internal.NewStringPointer(opts.Name), - fd: uint32(opts.Program.FD()), + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), }) if err != nil { return nil, err } - return &progAttachRawTracepoint{fd: fd}, nil + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil } -type progAttachRawTracepoint struct { - fd *internal.FD +type simpleRawTracepoint struct { + fd *sys.FD } -var _ Link = (*progAttachRawTracepoint)(nil) +var _ Link = (*simpleRawTracepoint)(nil) -func (rt *progAttachRawTracepoint) isLink() {} +func (frt *simpleRawTracepoint) isLink() {} -func (rt *progAttachRawTracepoint) Close() error { - return rt.fd.Close() +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() } -func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { - return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Pin(_ string) error { - return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Unpin() error { +func (frt *simpleRawTracepoint) Unpin() error { return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) } + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 00000000000..94f3958cc4d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go index a61499438b2..072dfade273 100644 --- a/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -2,28 +2,26 @@ package link import ( "errors" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // Type is the kind of link. -type Type uint32 +type Type = sys.LinkType // Valid link types. -// -// Equivalent to enum bpf_link_type. const ( - UnspecifiedType Type = iota - RawTracepointType - TracingType - CgroupType - IterType - NetNsType - XDPType + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP ) var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { @@ -69,7 +67,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't // present. - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ // We rely on this being checked after attachFlags. TargetFd: ^uint32(0), AttachBpfFd: uint32(prog.FD()), @@ -77,7 +75,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace AttachFlags: uint32(flagReplace), } - err = internal.BPFProgAttach(&attr) + err = sys.ProgAttach(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -87,73 +85,14 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace return err }) -type bpfLinkCreateAttr struct { - progFd uint32 - targetFd uint32 - attachType ebpf.AttachType - flags uint32 - targetBTFID uint32 -} - -func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkCreateIterAttr struct { - prog_fd uint32 - target_fd uint32 - attach_type ebpf.AttachType - flags uint32 - iter_info internal.Pointer - iter_info_len uint32 -} - -func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkUpdateAttr struct { - linkFd uint32 - newProgFd uint32 - flags uint32 - oldProgFd uint32 -} - -func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { - _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - defer prog.Close() - - attr := bpfLinkCreateAttr{ + attr := sys.LinkCreateAttr{ // This is a hopefully invalid file descriptor, which triggers EBADF. - targetFd: ^uint32(0), - progFd: uint32(prog.FD()), - attachType: ebpf.AttachCGroupInetIngress, + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), } - _, err = bpfLinkCreate(&attr) + _, err := sys.LinkCreate(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -162,30 +101,3 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { } return err }) - -type bpfIterCreateAttr struct { - linkFd uint32 - flags uint32 -} - -func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} - -type bpfRawTracepointOpenAttr struct { - name internal.Pointer - fd uint32 - _ uint32 -} - -func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 00000000000..5913592c67c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,153 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(new *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + info, err := targetProg.Info() + if err != nil { + return nil, err + } + btfID, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) + } + btfHandle, err := btf.NewHandleFromID(btfID) + if err != nil { + return nil, err + } + defer btfHandle.Close() + + var function *btf.Func + if err := btfHandle.Spec().TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID = function.ID() + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +// LoadPinnedFreplace loads a pinned iterator from a bpffs. +// +// Deprecated: use LoadPinnedLink instead. +func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + link, err := LoadPinnedRawLink(fileName, TracingType, opts) + if err != nil { + return nil, err + } + + return &tracing{*link}, err +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if err != nil { + return nil, err + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + + return &tracing{RawLink: RawLink{fd: fd}}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + return attachBTFID(opts.Program) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program) +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go index 59170ce0468..d603575ca55 100644 --- a/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -26,6 +26,17 @@ var ( err error }{} + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if err != nil { + return internal.ErrNotSupported + } + return nil + }) + // ErrNoSymbol indicates that the given symbol was not found // in the ELF symbols table. ErrNoSymbol = errors.New("not found") @@ -48,11 +59,22 @@ type UprobeOptions struct { // Only set the uprobe on the given process ID. Useful when tracing // shared library calls or programs that have many running instances. PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 } // To open a new Executable, use: // -// OpenExecutable("/bin/bash") +// OpenExecutable("/bin/bash") // // The returned value can then be used to open Uprobe(s). func OpenExecutable(path string) (*Executable, error) { @@ -161,7 +183,7 @@ func (ex *Executable) offset(symbol string) (uint64, error) { // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -193,7 +215,7 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -225,11 +247,12 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) } + if opts == nil { + opts = &UprobeOptions{} + } - var offset uint64 - if opts != nil && opts.Offset != 0 { - offset = opts.Offset - } else { + offset := opts.Offset + if offset == 0 { off, err := ex.offset(symbol) if err != nil { return nil, err @@ -237,13 +260,28 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti offset = off } - pid := perfAllThreads - if opts != nil && opts.PID != 0 { - pid = opts.PID + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } + + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) + } + } + + args := probeArgs{ + symbol: symbol, + path: ex.path, + offset: offset, + pid: pid, + refCtrOffset: opts.RefCtrOffset, + ret: ret, } // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(symbol, ex.path, offset, pid, ret) + tp, err := pmuUprobe(args) if err == nil { return tp, nil } @@ -252,7 +290,8 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // Use tracefs if uprobe PMU is missing. - tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, pid, ret) + args.symbol = uprobeSanitizedSymbol(symbol) + tp, err = tracefsUprobe(args) if err != nil { return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) } @@ -261,13 +300,13 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { - return pmuProbe(uprobeType, symbol, path, offset, pid, ret) +func pmuUprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(uprobeType, args) } // tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { - return tracefsProbe(uprobeType, symbol, path, offset, pid, ret) +func tracefsUprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(uprobeType, args) } // uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. @@ -275,9 +314,17 @@ func uprobeSanitizedSymbol(symbol string) string { return rgxUprobeSymbol.ReplaceAllString(symbol, "_") } -// uprobePathOffset creates the PATH:OFFSET token for the tracefs api. -func uprobePathOffset(path string, offset uint64) string { - return fmt.Sprintf("%s:%#x", path, offset) +// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func uprobeToken(args probeArgs) string { + po := fmt.Sprintf("%s:%#x", args.path, args.offset) + + if args.refCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.refCtrOffset) + } + + return po } func uretprobeBit() (uint64, error) { diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 00000000000..aa8dd3a4cb3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,54 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + return rawLink, err +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go index f3b1629e70a..b056f99aecd 100644 --- a/vendor/github.com/cilium/ebpf/linker.go +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -1,47 +1,80 @@ package ebpf import ( + "bytes" + "encoding/binary" "fmt" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal/btf" ) -// link resolves bpf-to-bpf calls. +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. // -// Each library may contain multiple functions / labels, and is only linked -// if prog references one of these functions. +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. // -// Libraries also linked. -func link(prog *ProgramSpec, libs []*ProgramSpec) error { - var ( - linked = make(map[*ProgramSpec]bool) - pending = []asm.Instructions{prog.Instructions} - insns asm.Instructions - ) - for len(pending) > 0 { - insns, pending = pending[0], pending[1:] - for _, lib := range libs { - if linked[lib] { - continue - } +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// populateReferences populates all of progs' Instructions and references +// with their full dependency chains including transient dependencies. +func populateReferences(progs map[string]*ProgramSpec) error { + type props struct { + insns asm.Instructions + refs map[string]*ProgramSpec + } - needed, err := needSection(insns, lib.Instructions) - if err != nil { - return fmt.Errorf("linking %s: %w", lib.Name, err) - } + out := make(map[string]props) - if !needed { - continue - } + // Resolve and store direct references between all progs. + if err := findReferences(progs); err != nil { + return fmt.Errorf("finding references: %w", err) + } + + // Flatten all progs' instruction streams. + for name, prog := range progs { + insns, refs := prog.flatten(nil) - linked[lib] = true - prog.Instructions = append(prog.Instructions, lib.Instructions...) - pending = append(pending, lib.Instructions) + prop := props{ + insns: insns, + refs: refs, + } - if prog.BTF != nil && lib.BTF != nil { - if err := prog.BTF.Append(lib.BTF); err != nil { - return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) - } + out[name] = prop + } + + // Replace all progs' instructions and references + for name, props := range out { + progs[name].Instructions = props.insns + progs[name].references = props.refs + } + + return nil +} + +// findReferences finds bpf-to-bpf calls between progs and populates each +// prog's references field with its direct neighbours. +func findReferences(progs map[string]*ProgramSpec) error { + // Check all ProgramSpecs in the collection against each other. + for _, prog := range progs { + prog.references = make(map[string]*ProgramSpec) + + // Look up call targets in progs and store pointers to their corresponding + // ProgramSpecs as direct references. + for refname := range prog.Instructions.FunctionReferences() { + ref := progs[refname] + // Call targets are allowed to be missing from an ELF. This occurs when + // a program calls into a forward function declaration that is left + // unimplemented. This is caught at load time during fixups. + if ref != nil { + prog.references[refname] = ref } } } @@ -49,39 +82,36 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error { return nil } -func needSection(insns, section asm.Instructions) (bool, error) { - // A map of symbols to the libraries which contain them. - symbols, err := section.SymbolOffsets() - if err != nil { - return false, err +// marshalFuncInfos returns the BTF func infos of all progs in order. +func marshalFuncInfos(layout []reference) ([]byte, error) { + if len(layout) == 0 { + return nil, nil } - for _, ins := range insns { - if ins.Reference == "" { - continue + buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.FuncInfo{})*len(layout))) + for _, sym := range layout { + if err := sym.spec.BTF.FuncInfo.Marshal(buf, sym.offset); err != nil { + return nil, fmt.Errorf("marshaling prog %s func info: %w", sym.spec.Name, err) } + } - if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { - continue - } + return buf.Bytes(), nil +} - if ins.Constant != -1 { - // This is already a valid call, no need to link again. - continue - } +// marshalLineInfos returns the BTF line infos of all progs in order. +func marshalLineInfos(layout []reference) ([]byte, error) { + if len(layout) == 0 { + return nil, nil + } - if _, ok := symbols[ins.Reference]; !ok { - // Symbol isn't available in this section - continue + buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.LineInfo{})*len(layout))) + for _, sym := range layout { + if err := sym.spec.BTF.LineInfos.Marshal(buf, sym.offset); err != nil { + return nil, fmt.Errorf("marshaling prog %s line infos: %w", sym.spec.Name, err) } - - // At this point we know that at least one function in the - // library is called from insns, so we have to link it. - return true, nil } - // None of the functions in the section are called. - return false, nil + return buf.Bytes(), nil } func fixupJumpsAndCalls(insns asm.Instructions) error { @@ -111,28 +141,32 @@ func fixupJumpsAndCalls(insns asm.Instructions) error { continue } + symOffset, ok := symbolOffsets[ins.Reference] switch { - case ins.IsFunctionCall() && ins.Constant == -1: - // Rewrite bpf to bpf call - callOffset, ok := symbolOffsets[ins.Reference] + case ins.IsFunctionReference() && ins.Constant == -1: if !ok { - return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference) + break } - ins.Constant = int64(callOffset - offset - 1) + ins.Constant = int64(symOffset - offset - 1) + continue - case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: - // Rewrite jump to label - jumpOffset, ok := symbolOffsets[ins.Reference] + case ins.OpCode.Class().IsJump() && ins.Offset == -1: if !ok { - return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference) + break } - ins.Offset = int16(jumpOffset - offset - 1) + ins.Offset = int16(symOffset - offset - 1) + continue case ins.IsLoadFromMap() && ins.MapPtr() == -1: - return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference) + return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedMap) + default: + // no fixup needed + continue } + + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference, errUnsatisfiedProgram) } // fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go index cca387ead01..b49b4018798 100644 --- a/vendor/github.com/cilium/ebpf/map.go +++ b/vendor/github.com/cilium/ebpf/map.go @@ -5,17 +5,22 @@ import ( "errors" "fmt" "io" + "math/rand" "path/filepath" "reflect" "strings" + "time" + "unsafe" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // Errors returned by Map and MapIterator methods. var ( + errFirstKeyNotFound = errors.New("first key not found") ErrKeyNotExist = errors.New("key does not exist") ErrKeyExist = errors.New("key already exists") ErrIterationAborted = errors.New("iteration aborted") @@ -67,9 +72,9 @@ type MapSpec struct { InnerMap *MapSpec // Extra trailing bytes found in the ELF map definition when using structs - // larger than libbpf's bpf_map_def. Must be empty before instantiating - // the MapSpec into a Map. - Extra bytes.Reader + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader // The BTF associated with this map. BTF *btf.Map @@ -97,6 +102,12 @@ func (ms *MapSpec) Copy() *MapSpec { return &cpy } +// hasBTF returns true if the MapSpec has a valid BTF spec and if its +// map type supports associated BTF metadata in the kernel. +func (ms *MapSpec) hasBTF() bool { + return ms.BTF != nil && ms.Type.hasBTF() +} + func (ms *MapSpec) clampPerfEventArraySize() error { if ms.Type != PerfEventArray { return nil @@ -151,7 +162,7 @@ func (ms *MapSpec) checkCompatibility(m *Map) error { // if you require custom encoding. type Map struct { name string - fd *internal.FD + fd *sys.FD typ MapType keySize uint32 valueSize uint32 @@ -166,18 +177,19 @@ type Map struct { // // You should not use fd after calling this function. func NewMapFromFD(fd int) (*Map, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newMapFromFD(internal.NewFD(uint32(fd))) + return newMapFromFD(f) } -func newMapFromFD(fd *internal.FD) (*Map, error) { +func newMapFromFD(fd *sys.FD) (*Map, error) { info, err := newMapInfoFromFd(fd) if err != nil { fd.Close() - return nil, fmt.Errorf("get map info: %s", err) + return nil, fmt.Errorf("get map info: %w", err) } return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) @@ -257,7 +269,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) } - var innerFd *internal.FD + var innerFd *sys.FD if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { if spec.InnerMap == nil { return nil, fmt.Errorf("%s requires InnerMap", spec.Type) @@ -288,7 +300,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ if spec.Pinning == PinByName { path := filepath.Join(opts.PinPath, spec.Name) if err := m.Pin(path); err != nil { - return nil, fmt.Errorf("pin map: %s", err) + return nil, fmt.Errorf("pin map: %w", err) } } @@ -297,7 +309,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ // createMap validates the spec's properties and creates the map in the kernel // using the given opts. It does not populate or freeze the map. -func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { +func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() @@ -310,8 +322,10 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han // additional 'inner_map_idx' and later 'numa_node' fields. // In order to support loading these definitions, tolerate the presence of // extra bytes, but require them to be zeroes. - if _, err := io.Copy(internal.DiscardZeroes{}, &spec.Extra); err != nil { - return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } } switch spec.Type { @@ -360,49 +374,48 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han return nil, fmt.Errorf("map create: %w", err) } } + if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if err := haveNoPreallocMaps(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(spec.Type), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), KeySize: spec.KeySize, ValueSize: spec.ValueSize, MaxEntries: spec.MaxEntries, - Flags: spec.Flags, + MapFlags: spec.Flags, NumaNode: spec.NumaNode, } if inner != nil { - var err error - attr.InnerMapFd, err = inner.Value() - if err != nil { - return nil, fmt.Errorf("map create: %w", err) - } + attr.InnerMapFd = inner.Uint() } if haveObjName() == nil { - attr.MapName = internal.NewBPFObjName(spec.Name) + attr.MapName = sys.NewObjName(spec.Name) } - var btfDisabled bool - if spec.BTF != nil { + if spec.hasBTF() { handle, err := handles.btfHandle(spec.BTF.Spec) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { + if err != nil && !errors.Is(err, btf.ErrNotSupported) { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.BTFFd = uint32(handle.FD()) - attr.BTFKeyTypeID = uint32(spec.BTF.Key.ID()) - attr.BTFValueTypeID = uint32(spec.BTF.Value.ID()) + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = uint32(spec.BTF.Key.ID()) + attr.BtfValueTypeId = uint32(spec.BTF.Value.ID()) } } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", err) + return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } - if btfDisabled { + if !spec.hasBTF() { return nil, fmt.Errorf("map create without BTF: %w", err) } return nil, fmt.Errorf("map create: %w", err) @@ -419,7 +432,7 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han // newMap allocates and returns a new Map structure. // Sets the fullValueSize on per-CPU maps. -func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, fd, @@ -482,6 +495,12 @@ func (m *Map) Info() (*MapInfo, error) { return newMapInfoFromFd(m.fd) } +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = 4 + // Lookup retrieves a value from a Map. // // Calls Close() on valueOut if it is of type **Map or **Program, @@ -490,39 +509,58 @@ func (m *Map) Info() (*MapInfo, error) { // Returns an error if the key doesn't exist, see ErrKeyNotExist. func (m *Map) Lookup(key, valueOut interface{}) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr); err != nil { + if err := m.lookup(key, valuePtr, 0); err != nil { return err } return m.unmarshalValue(valueOut, valueBytes) } -// LookupAndDelete retrieves and deletes a value from a Map. +// LookupWithFlags retrieves a value from a Map with flags. // -// Returns ErrKeyNotExist if the key doesn't exist. -func (m *Map) LookupAndDelete(key, valueOut interface{}) error { +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil { - return fmt.Errorf("lookup and delete failed: %w", err) + if err := m.lookup(key, valuePtr, flags); err != nil { + return err } return m.unmarshalValue(valueOut, valueBytes) } +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.lookupAndDelete(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + return m.lookupAndDelete(key, valueOut, flags) +} + // LookupBytes gets a value from Map. // // Returns a nil value if a key doesn't exist. func (m *Map) LookupBytes(key interface{}) ([]byte, error) { valueBytes := make([]byte, m.fullValueSize) - valuePtr := internal.NewSlicePointer(valueBytes) + valuePtr := sys.NewSlicePointer(valueBytes) - err := m.lookup(key, valuePtr) + err := m.lookup(key, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { return nil, nil } @@ -530,18 +568,47 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) { return valueBytes, err } -func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error { +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { keyPtr, err := m.marshalKey(key) if err != nil { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil { - return fmt.Errorf("lookup failed: %w", err) + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + return fmt.Errorf("lookup: %w", wrapMapError(err)) } return nil } +func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return m.unmarshalValue(valueOut, valueBytes) +} + // MapUpdateFlags controls the behaviour of the Map.Update call. // // The exact semantics depend on the specific MapType. @@ -554,6 +621,8 @@ const ( UpdateNoExist MapUpdateFlags = 1 << (iota - 1) // UpdateExist updates an existing element. UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock ) // Put replaces or creates a value in map. @@ -575,8 +644,15 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { return fmt.Errorf("can't marshal value: %w", err) } - if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil { - return fmt.Errorf("update failed: %w", err) + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) } return nil @@ -591,8 +667,13 @@ func (m *Map) Delete(key interface{}) error { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil { - return fmt.Errorf("delete failed: %w", err) + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) } return nil } @@ -624,7 +705,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error { // Returns nil if there are no more keys. func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { nextKey := make([]byte, m.keySize) - nextKeyPtr := internal.NewSlicePointer(nextKey) + nextKeyPtr := sys.NewSlicePointer(nextKey) err := m.nextKey(key, nextKeyPtr) if errors.Is(err, ErrKeyNotExist) { @@ -634,9 +715,9 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { return nextKey, err } -func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { var ( - keyPtr internal.Pointer + keyPtr sys.Pointer err error ) @@ -647,12 +728,77 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { } } - if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil { - return fmt.Errorf("next key failed: %w", err) + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey sys.Pointer + guessKey, err = m.guessNonExistentKey() + if err != nil { + return fmt.Errorf("can't guess starting key: %w", err) + } + + // Retry the syscall with a valid non-existing key. + attr.Key = guessKey + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + return nil } +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() (startKey sys.Pointer, err error) { + // Provide an invalid value pointer to prevent a copy on the kernel side. + valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0))) + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return sys.NewSlicePointer(randKey), nil + } + } + + return sys.Pointer{}, errFirstKeyNotFound +} + // BatchLookup looks up many elements in a map at once. // // "keysOut" and "valuesOut" must be of type slice, a pointer @@ -664,7 +810,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } // BatchLookupAndDelete looks up many elements in a map at once, @@ -679,10 +825,10 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } -func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { +func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { if err := haveBatchAPI(); err != nil { return 0, err } @@ -702,29 +848,36 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va return 0, fmt.Errorf("keysOut and valuesOut must be the same length") } keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := internal.NewSlicePointer(keyBuf) + keyPtr := sys.NewSlicePointer(keyBuf) valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := internal.NewSlicePointer(valueBuf) + valuePtr := sys.NewSlicePointer(valueBuf) + nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - var ( - startPtr internal.Pointer - err error - retErr error - ) + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + OutBatch: nextPtr, + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + var err error if startKey != nil { - startPtr, err = marshalPtr(startKey, int(m.keySize)) + attr.InBatch, err = marshalPtr(startKey, int(m.keySize)) if err != nil { return 0, err } } - nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts) - if err != nil { - if !errors.Is(err, ErrKeyNotExist) { - return 0, err - } - retErr = ErrKeyNotExist + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr } err = m.unmarshalKey(nextKeyOut, nextBuf) @@ -737,9 +890,10 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va } err = unmarshalBytes(valuesOut, valueBuf) if err != nil { - retErr = err + return 0, err } - return int(ct), retErr + + return int(attr.Count), sysErr } // BatchUpdate updates the map with multiple keys and values @@ -763,7 +917,7 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er } var ( count = keysValue.Len() - valuePtr internal.Pointer + valuePtr sys.Pointer err error ) if count != valuesValue.Len() { @@ -777,9 +931,24 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er if err != nil { return 0, err } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // BatchDelete batch deletes entries in the map by keys. @@ -800,9 +969,23 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { if err != nil { return 0, fmt.Errorf("cannot marshal keys: %v", err) } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // Iterate traverses a map. @@ -830,14 +1013,7 @@ func (m *Map) Close() error { // // Calling this function is invalid after Close has been called. func (m *Map) FD() int { - fd, err := m.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) + return m.fd.Int() } // Clone creates a duplicate of the Map. @@ -912,7 +1088,11 @@ func (m *Map) Freeze() error { return fmt.Errorf("can't freeze map: %w", err) } - if err := bpfMapFreeze(m.fd); err != nil { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { return fmt.Errorf("can't freeze map: %w", err) } return nil @@ -936,13 +1116,13 @@ func (m *Map) finalize(spec *MapSpec) error { return nil } -func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { if data == nil { if m.keySize == 0 { // Queues have a key length of zero, so passing nil here is valid. - return internal.NewPointer(nil), nil + return sys.NewPointer(nil), nil } - return internal.Pointer{}, errors.New("can't use nil as key of map") + return sys.Pointer{}, errors.New("can't use nil as key of map") } return marshalPtr(data, int(m.keySize)) @@ -957,7 +1137,7 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error { return unmarshalBytes(data, buf) } -func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { if m.typ.hasPerCPUValue() { return marshalPerCPUValue(data, int(m.valueSize)) } @@ -970,13 +1150,13 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { switch value := data.(type) { case *Map: if !m.typ.canStoreMap() { - return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) } buf, err = marshalMap(value, int(m.valueSize)) case *Program: if !m.typ.canStoreProgram() { - return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) } buf, err = marshalProgram(value, int(m.valueSize)) @@ -985,10 +1165,10 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { } if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } func (m *Map) unmarshalValue(value interface{}, buf []byte) error { @@ -1052,7 +1232,10 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error { // LoadPinnedMap loads a Map from a BPF file. func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -1081,13 +1264,8 @@ func marshalMap(m *Map, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal map to %d bytes", length) } - fd, err := m.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, fd) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) return buf, nil } @@ -1239,15 +1417,17 @@ func (mi *MapIterator) Err() error { // // Returns ErrNotExist, if there is no next eBPF map. func MapGetNextID(startID MapID) (MapID, error) { - id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID)) - return MapID(id), err + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) } // NewMapFromID returns the map for a given id. // // Returns ErrNotExist, if there is no eBPF map with the given id. func NewMapFromID(id MapID) (*Map, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, err } @@ -1259,9 +1439,9 @@ func NewMapFromID(id MapID) (*Map, error) { // // Deprecated: use MapInfo.ID() instead. func (m *Map) ID() (MapID, error) { - info, err := bpfGetMapInfoByFD(m.fd) - if err != nil { + var info sys.MapInfo + if err := sys.ObjInfo(m.fd, &info); err != nil { return MapID(0), err } - return MapID(info.id), nil + return MapID(info.Id), nil } diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go index e461d673d70..4351cc57f40 100644 --- a/vendor/github.com/cilium/ebpf/marshalers.go +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -12,6 +12,7 @@ import ( "unsafe" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) // marshalPtr converts an arbitrary value into a pointer suitable @@ -19,17 +20,17 @@ import ( // // As an optimization, it returns the original value if it is an // unsafe.Pointer. -func marshalPtr(data interface{}, length int) (internal.Pointer, error) { +func marshalPtr(data interface{}, length int) (sys.Pointer, error) { if ptr, ok := data.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf, err := marshalBytes(data, length) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // marshalBytes converts an arbitrary value into a byte buffer. @@ -73,13 +74,13 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) { return buf, nil } -func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { +func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) { if ptr, ok := dst.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf := make([]byte, length) - return internal.NewSlicePointer(buf), buf + return sys.NewSlicePointer(buf), buf } var bytesReaderPool = sync.Pool{ @@ -164,21 +165,21 @@ func unmarshalBytes(data interface{}, buf []byte) error { // Values are initialized to zero if the slice has less elements than CPUs. // // slice must have a type like []elementType. -func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { +func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) { sliceType := reflect.TypeOf(slice) if sliceType.Kind() != reflect.Slice { - return internal.Pointer{}, errors.New("per-CPU value requires slice") + return sys.Pointer{}, errors.New("per-CPU value requires slice") } possibleCPUs, err := internal.PossibleCPUs() if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } sliceValue := reflect.ValueOf(slice) sliceLen := sliceValue.Len() if sliceLen > possibleCPUs { - return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") } alignedElemLength := internal.Align(elemLength, 8) @@ -188,14 +189,14 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er elem := sliceValue.Index(i).Interface() elemBytes, err := marshalBytes(elem, elemLength) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } offset := i * alignedElemLength copy(buf[offset:offset+elemLength], elemBytes) } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // unmarshalPerCPUValue decodes a buffer into a slice containing one value per diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go index 3549a3fe3f0..523e6a54e7c 100644 --- a/vendor/github.com/cilium/ebpf/prog.go +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -14,13 +14,15 @@ import ( "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // ErrNotSupported is returned whenever the kernel doesn't support a feature. var ErrNotSupported = internal.ErrNotSupported -var errUnsatisfiedReference = errors.New("unsatisfied reference") +var errUnsatisfiedMap = errors.New("unsatisfied map reference") +var errUnsatisfiedProgram = errors.New("unsatisfied program reference") // ProgramID represents the unique ID of an eBPF program. type ProgramID uint32 @@ -61,11 +63,17 @@ type ProgramSpec struct { // Type determines at which hook in the kernel a program will run. Type ProgramType AttachType AttachType + // Name of a kernel data structure or function to attach to. Its // interpretation depends on Type and AttachType. AttachTo string + // The program to attach to. Must be provided manually. AttachTarget *Program + + // The name of the ELF section this program orininated from. + SectionName string + Instructions asm.Instructions // Flags is passed to the kernel and specifies additional program @@ -91,6 +99,9 @@ type ProgramSpec struct { // The byte order this program was compiled for, may be nil. ByteOrder binary.ByteOrder + + // Programs called by this ProgramSpec. Includes all dependencies. + references map[string]*ProgramSpec } // Copy returns a copy of the spec. @@ -112,6 +123,83 @@ func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } +// flatten returns spec's full instruction stream including all of its +// dependencies and an expanded map of references that includes all symbols +// appearing in the instruction stream. +// +// Returns nil, nil if spec was already visited. +func (spec *ProgramSpec) flatten(visited map[*ProgramSpec]bool) (asm.Instructions, map[string]*ProgramSpec) { + if visited == nil { + visited = make(map[*ProgramSpec]bool) + } + + // This program and its dependencies were already collected. + if visited[spec] { + return nil, nil + } + + visited[spec] = true + + // Start off with spec's direct references and instructions. + progs := spec.references + insns := spec.Instructions + + // Recurse into each reference and append/merge its references into + // a temporary buffer as to not interfere with the resolution process. + for _, ref := range spec.references { + if ri, rp := ref.flatten(visited); ri != nil || rp != nil { + insns = append(insns, ri...) + + // Merge nested references into the top-level scope. + for n, p := range rp { + progs[n] = p + } + } + } + + return insns, progs +} + +// A reference describes a byte offset an Symbol Instruction pointing +// to another ProgramSpec. +type reference struct { + offset uint64 + spec *ProgramSpec +} + +// layout returns a unique list of programs that must be included +// in spec's instruction stream when inserting it into the kernel. +// Always returns spec itself as the first entry in the chain. +func (spec *ProgramSpec) layout() ([]reference, error) { + out := []reference{{0, spec}} + + name := spec.Instructions.Name() + + var ins *asm.Instruction + iter := spec.Instructions.Iterate() + for iter.Next() { + ins = iter.Ins + + // Skip non-symbols and symbols that describe the ProgramSpec itself, + // which is usually the first instruction in Instructions. + // ProgramSpec itself is already included and not present in references. + if ins.Symbol == "" || ins.Symbol == name { + continue + } + + // Failure to look up a reference is not an error. There are existing tests + // with valid progs that contain multiple symbols and don't have references + // populated. Assume ProgramSpec is used similarly in the wild, so don't + // alter this behaviour. + ref := spec.references[ins.Symbol] + if ref != nil { + out = append(out, reference{iter.Offset.Bytes(), ref}) + } + } + + return out, nil +} + // Program represents BPF program loaded into the kernel. // // It is not safe to close a Program which is used by other goroutines. @@ -120,7 +208,7 @@ type Program struct { // otherwise it is empty. VerifierLog string - fd *internal.FD + fd *sys.FD name string pinnedPath string typ ProgramType @@ -139,11 +227,15 @@ func NewProgram(spec *ProgramSpec) (*Program, error) { // Loading a program for the first time will perform // feature detection by loading small, temporary programs. func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + handles := newHandleCache() defer handles.close() prog, err := newProgramWithOptions(spec, opts, handles) - if errors.Is(err, errUnsatisfiedReference) { + if errors.Is(err, errUnsatisfiedMap) { return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) } return prog, err @@ -154,6 +246,10 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand return nil, errors.New("instructions cannot be empty") } + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) } @@ -171,16 +267,16 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand kv = v.Kernel() } - attr := &internal.BPFProgLoadAttr{ - ProgType: uint32(spec.Type), + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), ProgFlags: spec.Flags, - ExpectedAttachType: uint32(spec.AttachType), - License: internal.NewStringPointer(spec.License), - KernelVersion: kv, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, } if haveObjName() == nil { - attr.ProgName = internal.NewBPFObjName(spec.Name) + attr.ProgName = sys.NewObjName(spec.Name) } var err error @@ -192,6 +288,11 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand } } + layout, err := spec.layout() + if err != nil { + return nil, fmt.Errorf("get program layout: %w", err) + } + var btfDisabled bool var core btf.COREFixups if spec.BTF != nil { @@ -207,23 +308,23 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand } if handle != nil { - attr.ProgBTFFd = uint32(handle.FD()) + attr.ProgBtfFd = uint32(handle.FD()) - recSize, bytes, err := spec.BTF.LineInfos() + fib, err := marshalFuncInfos(layout) if err != nil { - return nil, fmt.Errorf("get BTF line infos: %w", err) + return nil, err } - attr.LineInfoRecSize = recSize - attr.LineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.LineInfo = internal.NewSlicePointer(bytes) + attr.FuncInfoRecSize = uint32(binary.Size(btf.FuncInfo{})) + attr.FuncInfoCnt = uint32(len(fib)) / attr.FuncInfoRecSize + attr.FuncInfo = sys.NewSlicePointer(fib) - recSize, bytes, err = spec.BTF.FuncInfos() + lib, err := marshalLineInfos(layout) if err != nil { - return nil, fmt.Errorf("get BTF function infos: %w", err) + return nil, err } - attr.FuncInfoRecSize = recSize - attr.FuncInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.FuncInfo = internal.NewSlicePointer(bytes) + attr.LineInfoRecSize = uint32(binary.Size(btf.LineInfo{})) + attr.LineInfoCnt = uint32(len(lib)) / attr.LineInfoRecSize + attr.LineInfo = sys.NewSlicePointer(lib) } } @@ -236,15 +337,15 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand return nil, err } - buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) err = insns.Marshal(buf, internal.NativeEndian) if err != nil { return nil, err } bytecode := buf.Bytes() - attr.Instructions = internal.NewSlicePointer(bytecode) - attr.InsCount = uint32(len(bytecode) / asm.InstructionSize) + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) if spec.AttachTo != "" { if spec.AttachTarget != nil { @@ -274,7 +375,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand return nil, err } if target != nil { - attr.AttachBTFID = uint32(target.ID()) + attr.AttachBtfId = uint32(target.ID()) } if spec.AttachTarget != nil { attr.AttachProgFd = uint32(spec.AttachTarget.FD()) @@ -291,12 +392,12 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand logBuf = make([]byte, logSize) attr.LogLevel = opts.LogLevel attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = internal.NewSlicePointer(logBuf) + attr.LogBuf = sys.NewSlicePointer(logBuf) } - fd, err := internal.BPFProgLoad(attr) + fd, err := sys.ProgLoad(attr) if err == nil { - return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil } logErr := err @@ -305,18 +406,18 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand logBuf = make([]byte, logSize) attr.LogLevel = 1 attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = internal.NewSlicePointer(logBuf) + attr.LogBuf = sys.NewSlicePointer(logBuf) - fd, logErr = internal.BPFProgLoad(attr) + fd, logErr = sys.ProgLoad(attr) if logErr == nil { fd.Close() } } - if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { + if errors.Is(logErr, unix.EPERM) && len(logBuf) > 0 && logBuf[0] == 0 { // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", logErr) + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", logErr) } err = internal.ErrorWithLog(err, logBuf, logErr) @@ -332,18 +433,21 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand // // Requires at least Linux 4.10. func NewProgramFromFD(fd int) (*Program, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newProgramFromFD(internal.NewFD(uint32(fd))) + return newProgramFromFD(f) } // NewProgramFromID returns the program for a given id. // // Returns ErrNotExist, if there is no eBPF program with the given id. func NewProgramFromID(id ProgramID) (*Program, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, fmt.Errorf("get program by id: %w", err) } @@ -351,7 +455,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) { return newProgramFromFD(fd) } -func newProgramFromFD(fd *internal.FD) (*Program, error) { +func newProgramFromFD(fd *sys.FD) (*Program, error) { info, err := newProgramInfoFromFd(fd) if err != nil { fd.Close() @@ -384,14 +488,7 @@ func (p *Program) Info() (*ProgramInfo, error) { // // It is invalid to call this function after Close has been called. func (p *Program) FD() int { - fd, err := p.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) + return p.fd.Int() } // Clone creates a duplicate of the Program. @@ -505,13 +602,13 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e // Programs require at least 14 bytes input in := make([]byte, 14) - attr := bpfProgTestRunAttr{ - fd: uint32(prog.FD()), - dataSizeIn: uint32(len(in)), - dataIn: internal.NewSlicePointer(in), + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), } - err = bpfProgTestRun(&attr) + err = sys.ProgRun(&attr) if errors.Is(err, unix.EINVAL) { // Check for EINVAL specifically, rather than err != nil since we // otherwise misdetect due to insufficient permissions. @@ -548,22 +645,17 @@ func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, // See https://patchwork.ozlabs.org/cover/1006822/ out := make([]byte, len(in)+outputPad) - fd, err := p.fd.Value() - if err != nil { - return 0, nil, 0, err - } - - attr := bpfProgTestRunAttr{ - fd: fd, - dataSizeIn: uint32(len(in)), - dataSizeOut: uint32(len(out)), - dataIn: internal.NewSlicePointer(in), - dataOut: internal.NewSlicePointer(out), - repeat: uint32(repeat), + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(in)), + DataSizeOut: uint32(len(out)), + DataIn: sys.NewSlicePointer(in), + DataOut: sys.NewSlicePointer(out), + Repeat: uint32(repeat), } for { - err = bpfProgTestRun(&attr) + err := sys.ProgRun(&attr) if err == nil { break } @@ -578,15 +670,15 @@ func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, return 0, nil, 0, fmt.Errorf("can't run test: %w", err) } - if int(attr.dataSizeOut) > cap(out) { + if int(attr.DataSizeOut) > cap(out) { // Houston, we have a problem. The program created more data than we allocated, // and the kernel wrote past the end of our buffer. panic("kernel wrote past end of output buffer") } - out = out[:int(attr.dataSizeOut)] + out = out[:int(attr.DataSizeOut)] - total := time.Duration(attr.duration) * time.Nanosecond - return attr.retval, out, total, nil + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, out, total, nil } func unmarshalProgram(buf []byte) (*Program, error) { @@ -605,13 +697,8 @@ func marshalProgram(p *Program, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal program to %d bytes", length) } - value, err := p.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, value) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) return buf, nil } @@ -623,19 +710,14 @@ func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { return errors.New("invalid fd") } - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ TargetFd: uint32(fd), - AttachBpfFd: pfd, + AttachBpfFd: p.fd.Uint(), AttachType: uint32(typ), AttachFlags: uint32(flags), } - return internal.BPFProgAttach(&attr) + return sys.ProgAttach(&attr) } // Detach a Program. @@ -650,25 +732,23 @@ func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { return errors.New("flags must be zero") } - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgDetachAttr{ + attr := sys.ProgAttachAttr{ TargetFd: uint32(fd), - AttachBpfFd: pfd, + AttachBpfFd: p.fd.Uint(), AttachType: uint32(typ), } - return internal.BPFProgDetach(&attr) + return sys.ProgAttach(&attr) } // LoadPinnedProgram loads a Program from a BPF file. // // Requires at least Linux 4.11. func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -702,19 +782,32 @@ func SanitizeName(name string, replacement rune) string { // // Returns ErrNotExist, if there is no next eBPF program. func ProgramGetNextID(startID ProgramID) (ProgramID, error) { - id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) - return ProgramID(id), err + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) } // ID returns the systemwide unique ID of the program. // // Deprecated: use ProgramInfo.ID() instead. func (p *Program) ID() (ProgramID, error) { - info, err := bpfGetProgInfoByFD(p.fd, nil) - if err != nil { + var info sys.ProgInfo + if err := sys.ObjInfo(p.fd, &info); err != nil { return ProgramID(0), err } - return ProgramID(info.id), nil + return ProgramID(info.Id), nil +} + +// BindMap binds map to the program and is only released once program is released. +// +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), + } + + return sys.ProgBindMap(attr) } func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { @@ -723,7 +816,11 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp a AttachType } - var typeName, featureName string + var ( + typeName, featureName string + isBTFTypeFunc = true + ) + switch (match{progType, attachType}) { case match{LSM, AttachLSMMac}: typeName = "bpf_lsm_" + name @@ -734,26 +831,50 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp case match{Extension, AttachNone}: typeName = name featureName = fmt.Sprintf("freplace %s", name) + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + isBTFTypeFunc = false default: return nil, nil } + var ( + target btf.Type + err error + ) if spec == nil { - var err error spec, err = btf.LoadKernelSpec() if err != nil { return nil, fmt.Errorf("load kernel spec: %w", err) } } - var target *btf.Func - err := spec.FindType(typeName, &target) - if errors.Is(err, btf.ErrNotFound) { - return nil, &internal.UnsupportedFeatureError{ - Name: featureName, - } + if isBTFTypeFunc { + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + target = targetFunc + } else { + var targetTypedef *btf.Typedef + err = spec.TypeByName(typeName, &targetTypedef) + target = targetTypedef } + if err != nil { + if errors.Is(err, btf.ErrNotFound) { + return nil, &internal.UnsupportedFeatureError{ + Name: featureName, + } + } return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) } diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh index a079edc7e18..472bc4f1a75 100644 --- a/vendor/github.com/cilium/ebpf/run-tests.sh +++ b/vendor/github.com/cilium/ebpf/run-tests.sh @@ -52,7 +52,7 @@ if [[ "${1:-}" = "--exec-vm" ]]; then --rwdir="${testdir}=${testdir}" \ --rodir=/run/input="${input}" \ --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ + --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \ --kopt possible_cpus=2; then # need at least two CPUs for some tests exit 23 fi @@ -90,22 +90,27 @@ fi shift readonly kernel="linux-${kernel_version}.bz" -readonly selftests="linux-${kernel_version}-selftests-bpf.bz" +readonly selftests="linux-${kernel_version}-selftests-bpf.tgz" readonly input="$(mktemp -d)" readonly tmp_dir="${TMPDIR:-/tmp}" readonly branch="${BRANCH:-master}" fetch() { echo Fetching "${1}" - wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + pushd "${tmp_dir}" > /dev/null + curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + local ret=$? + popd > /dev/null + return $ret } fetch "${kernel}" cp "${tmp_dir}/${kernel}" "${input}/bzImage" if fetch "${selftests}"; then + echo "Decompressing selftests" mkdir "${input}/bpf" - tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" + tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" else echo "No selftests found, disabling" fi @@ -117,6 +122,8 @@ fi export GOFLAGS=-mod=readonly export CGO_ENABLED=0 +# LINUX_VERSION_CODE test compares this to discovered value. +export KERNEL_VERSION="${kernel_version}" echo Testing on "${kernel_version}" go test -exec "$script --exec-vm $input" "${args[@]}" diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go index f8cb5f0e0cd..ccbbe096e8c 100644 --- a/vendor/github.com/cilium/ebpf/syscalls.go +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" "os" - "unsafe" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -38,108 +38,9 @@ func invalidBPFObjNameChar(char rune) bool { } } -type bpfMapOpAttr struct { - mapFd uint32 - padding uint32 - key internal.Pointer - value internal.Pointer - flags uint64 -} - -type bpfBatchMapOpAttr struct { - inBatch internal.Pointer - outBatch internal.Pointer - keys internal.Pointer - values internal.Pointer - count uint32 - mapFd uint32 - elemFlags uint64 - flags uint64 -} - -type bpfMapInfo struct { - map_type uint32 // since 4.12 1e2709769086 - id uint32 - key_size uint32 - value_size uint32 - max_entries uint32 - map_flags uint32 - name internal.BPFObjName // since 4.15 ad5b177bd73f - ifindex uint32 // since 4.16 52775b33bb50 - btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6 - netns_dev uint64 // since 4.16 52775b33bb50 - netns_ino uint64 - btf_id uint32 // since 4.18 78958fca7ead - btf_key_type_id uint32 // since 4.18 9b2cf328b2ec - btf_value_type_id uint32 -} - -type bpfProgInfo struct { - prog_type uint32 - id uint32 - tag [unix.BPF_TAG_SIZE]byte - jited_prog_len uint32 - xlated_prog_len uint32 - jited_prog_insns internal.Pointer - xlated_prog_insns internal.Pointer - load_time uint64 // since 4.15 cb4d2b3f03d8 - created_by_uid uint32 - nr_map_ids uint32 // since 4.15 cb4d2b3f03d8 - map_ids internal.Pointer - name internal.BPFObjName // since 4.15 067cae47771c - ifindex uint32 - gpl_compatible uint32 - netns_dev uint64 - netns_ino uint64 - nr_jited_ksyms uint32 - nr_jited_func_lens uint32 - jited_ksyms internal.Pointer - jited_func_lens internal.Pointer - btf_id uint32 - func_info_rec_size uint32 - func_info internal.Pointer - nr_func_info uint32 - nr_line_info uint32 - line_info internal.Pointer - jited_line_info internal.Pointer - nr_jited_line_info uint32 - line_info_rec_size uint32 - jited_line_info_rec_size uint32 - nr_prog_tags uint32 - prog_tags internal.Pointer - run_time_ns uint64 - run_cnt uint64 -} - -type bpfProgTestRunAttr struct { - fd uint32 - retval uint32 - dataSizeIn uint32 - dataSizeOut uint32 - dataIn internal.Pointer - dataOut internal.Pointer - repeat uint32 - duration uint32 -} - -type bpfMapFreezeAttr struct { - mapFd uint32 -} - -type bpfObjGetNextIDAttr struct { - startID uint32 - nextID uint32 - openFlags uint32 -} - -func bpfProgTestRun(attr *bpfProgTestRunAttr) error { - _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { - _, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(ArrayOfMaps), + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), KeySize: 4, ValueSize: 4, MaxEntries: 1, @@ -158,12 +59,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_RDONLY_PROG, + MapFlags: unix.BPF_F_RDONLY_PROG, }) if err != nil { return internal.ErrNotSupported @@ -174,12 +75,12 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_MMAPABLE, + MapFlags: unix.BPF_F_MMAPABLE, }) if err != nil { return internal.ErrNotSupported @@ -190,12 +91,12 @@ var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { // This checks BPF_F_INNER_MAP, which appeared in 5.10. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_INNER_MAP, + MapFlags: unix.BPF_F_INNER_MAP, }) if err != nil { return internal.ErrNotSupported @@ -204,111 +105,21 @@ var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { return nil }) -func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - flags: flags, - } - _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - } - _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: nextKeyOut, - } - _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { - attr := bpfObjGetNextIDAttr{ - startID: start, - } - _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return attr.nextID, err -} - -func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { - fd, err := m.Value() +var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) if err != nil { - return 0, err - } - - attr := bpfBatchMapOpAttr{ - inBatch: inBatch, - outBatch: outBatch, - keys: keys, - values: values, - count: count, - mapFd: fd, - } - if opts != nil { - attr.elemFlags = opts.ElemFlags - attr.flags = opts.Flags + return internal.ErrNotSupported } - _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - // always return count even on an error, as things like update might partially be fulfilled. - return attr.count, wrapMapError(err) -} + _ = m.Close() + return nil +}) func wrapMapError(err error) error { if err == nil { @@ -316,15 +127,15 @@ func wrapMapError(err error) error { } if errors.Is(err, unix.ENOENT) { - return internal.SyscallError(ErrKeyNotExist, unix.ENOENT) + return sys.Error(ErrKeyNotExist, unix.ENOENT) } if errors.Is(err, unix.EEXIST) { - return internal.SyscallError(ErrKeyExist, unix.EEXIST) + return sys.Error(ErrKeyExist, unix.EEXIST) } if errors.Is(err, unix.ENOTSUPP) { - return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) + return sys.Error(ErrNotSupported, unix.ENOTSUPP) } if errors.Is(err, unix.E2BIG) { @@ -334,51 +145,16 @@ func wrapMapError(err error) error { return err } -func bpfMapFreeze(m *internal.FD) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapFreezeAttr{ - mapFd: fd, - } - _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return err -} - -func bpfGetProgInfoByFD(fd *internal.FD, ids []MapID) (*bpfProgInfo, error) { - var info bpfProgInfo - if len(ids) > 0 { - info.nr_map_ids = uint32(len(ids)) - info.map_ids = internal.NewPointer(unsafe.Pointer(&ids[0])) - } - - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - return &info, nil -} - -func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { - var info bpfMapInfo - err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { - return nil, fmt.Errorf("can't get map info: %w", err) - } - return &info, nil -} - var haveObjName = internal.FeatureTest("object names", "4.15", func() error { - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName("feature_test"), + MapName: sys.NewObjName("feature_test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -392,15 +168,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() return err } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName(".test"), + MapName: sys.NewObjName(".test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -411,24 +187,30 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { var maxEntries uint32 = 2 - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Hash), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), KeySize: 4, ValueSize: 4, MaxEntries: maxEntries, } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } defer fd.Close() + keys := []uint32{1, 2} values := []uint32{3, 4} kp, _ := marshalPtr(keys, 8) vp, _ := marshalPtr(values, 8) - nilPtr := internal.NewPointer(nil) - _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) if err != nil { return internal.ErrNotSupported } @@ -444,17 +226,17 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f asm.FnProbeReadKernel.Call(), asm.Return(), } - buf := bytes.NewBuffer(make([]byte, 0, len(insns)*asm.InstructionSize)) + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) if err := insns.Marshal(buf, internal.NativeEndian); err != nil { return err } bytecode := buf.Bytes() - fd, err := internal.BPFProgLoad(&internal.BPFProgLoadAttr{ - ProgType: uint32(Kprobe), - License: internal.NewStringPointer("GPL"), - Instructions: internal.NewSlicePointer(bytecode), - InsCount: uint32(len(bytecode) / asm.InstructionSize), + fd, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(Kprobe), + License: sys.NewStringPointer("GPL"), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), }) if err != nil { return internal.ErrNotSupported diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go index 84b83f9f985..a27b4424745 100644 --- a/vendor/github.com/cilium/ebpf/types.go +++ b/vendor/github.com/cilium/ebpf/types.go @@ -11,7 +11,7 @@ import ( type MapType uint32 // Max returns the latest supported MapType. -func (_ MapType) Max() MapType { +func (MapType) Max() MapType { return maxMapType - 1 } @@ -103,12 +103,6 @@ const ( maxMapType ) -// Deprecated: StructOpts was a typo, use StructOpsMap instead. -// -// Declared as a variable to prevent stringer from picking it up -// as an enum value. -var StructOpts MapType = StructOpsMap - // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage @@ -126,11 +120,22 @@ func (mt MapType) canStoreProgram() bool { return mt == ProgramArray } +// hasBTF returns true if the map type supports BTF key/value metadata. +func (mt MapType) hasBTF() bool { + switch mt { + case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap, + DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf: + return false + default: + return true + } +} + // ProgramType of the eBPF program type ProgramType uint32 // Max return the latest supported ProgramType. -func (_ ProgramType) Max() ProgramType { +func (ProgramType) Max() ProgramType { return maxProgramType - 1 } @@ -167,6 +172,7 @@ const ( Extension LSM SkLookup + Syscall maxProgramType ) diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go index 81cbc9efde0..e80b948b096 100644 --- a/vendor/github.com/cilium/ebpf/types_string.go +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -86,12 +86,13 @@ func _() { _ = x[Extension-28] _ = x[LSM-29] _ = x[SkLookup-30] - _ = x[maxProgramType-31] + _ = x[Syscall-31] + _ = x[maxProgramType-32] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupmaxProgramType" +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType" -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 308} +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315} func (i ProgramType) String() string { if i >= ProgramType(len(_ProgramType_index)-1) { diff --git a/vendor/modules.txt b/vendor/modules.txt index ce2cb8ac820..faa81229ca3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2,12 +2,13 @@ ## explicit github.com/checkpoint-restore/go-criu/v5 github.com/checkpoint-restore/go-criu/v5/rpc -# github.com/cilium/ebpf v0.7.0 +# github.com/cilium/ebpf v0.8.1 ## explicit github.com/cilium/ebpf github.com/cilium/ebpf/asm github.com/cilium/ebpf/internal github.com/cilium/ebpf/internal/btf +github.com/cilium/ebpf/internal/sys github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link # github.com/containerd/console v1.0.3