diff --git a/.github/workflows/bash-completion.yaml b/.github/workflows/bash-completion.yaml new file mode 100644 index 00000000..bb80b316 --- /dev/null +++ b/.github/workflows/bash-completion.yaml @@ -0,0 +1,109 @@ +name: bash-completion + +on: + workflow_dispatch: + inputs: + SHA: + description: "SHA under test" + required: true + pull_request: + paths: + - '.github/workflows/bash-completion.yaml' + - 'bash-completion/bpftool' + - 'libbpf/**' + - 'src/**' + push: + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.event.after }} + cancel-in-progress: true + +jobs: + bash-completion: + name: Bash completion + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + path: 'bpftool' + submodules: recursive + ref: ${{ inputs.SHA || github.sha }} + + - name: Checkout bash-completion repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: 'scop/bash-completion' + ref: '81b0f8c4dfafd6219c95ffa1defad7ff34394b0f' # v2.13.0 + path: 'bash-completion' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + libbpf-dev libelf-dev llvm + + - name: Set up Python + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + with: + python-version: '3.10' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + sudo pip install pytest pytest-xdist psutil + + - name: Build and install bpftool + working-directory: 'bpftool' + run: | + sudo CLANG='/usr/bin/false' LLVM_CONFIG="$CLANG" \ + make -j -C src install + + #- name: Build and load sample BPF program, map + # working-directory: '/tmp' + # run: | + # cat > test.c << EOF + # #include + # #include + + # struct { + # __uint(type, BPF_MAP_TYPE_ARRAY); + # __type(key, __u32); + # __type(value, __u32); + # __uint(max_entries, 1); + # } bash_comp_map SEC(".maps"); + + # int bash_comp_test(__attribute__((unused)) void *ctx) + # { + # __u32 key = 0; + # __u32 *value; + + # value = bpf_map_lookup_elem(&bash_comp_map, &key); + # if (!value) + # return 0; + + # return *value; + # } + # EOF + # cat test.c + # clang -g -O2 -emit-llvm -c test.c -o - | \ + # llc -march=bpf -filetype=obj -o test.o + # sudo bpftool prog load test.o /sys/fs/bpf/bash-completion-test type xdp + # sudo bpftool prog list + # sudo bpftool map list + + - name: Move Bash completion files to bash-completion repository + run: | + mv -t bash-completion/completions/ bpftool/bash-completion/bpftool + mv -t bash-completion/test/t/ bpftool/scripts/test_bpftool.py + # Shorten timeout (defaults to 30), too long to debug CI + sed -i '/bash.expect(/,/]$/ s/]$/] , timeout=3/' \ + bash-completion/test/t/conftest.py + + - name: Run Bash completion tests + working-directory: 'bash-completion' + run: | + sudo PYTHONDONTWRITEBYTECODE=1 \ + pytest -n auto --color=yes -vv test/t/test_bpftool.py diff --git a/BPF-CHECKPOINT-COMMIT b/BPF-CHECKPOINT-COMMIT index 70bb5d7b..8236b54f 100644 --- a/BPF-CHECKPOINT-COMMIT +++ b/BPF-CHECKPOINT-COMMIT @@ -1 +1 @@ -9077fc228f09c9f975c498c55f5d2e882cd0da59 +443574b033876c85a35de4c65c14f7fe092222b2 diff --git a/CHECKPOINT-COMMIT b/CHECKPOINT-COMMIT index a1969133..20a84b7a 100644 --- a/CHECKPOINT-COMMIT +++ b/CHECKPOINT-COMMIT @@ -1 +1 @@ -2147c8d07e1abc8dfc3433ca18eed5295e230ede +2a24e2485722b0e12e17a2bd473bd15c9e420bdb diff --git a/bash-completion/bpftool b/bash-completion/bpftool index 085bf18f..04afe2ac 100644 --- a/bash-completion/bpftool +++ b/bash-completion/bpftool @@ -106,19 +106,19 @@ _bpftool_get_link_ids() _bpftool_get_obj_map_names() { - local obj + local obj maps obj=$1 - maps=$(objdump -j maps -t $obj 2>/dev/null | \ - command awk '/g . maps/ {print $NF}') + maps=$(objdump -j .maps -t $obj 2>/dev/null | \ + command awk '/g . .maps/ {print $NF}') COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) ) } _bpftool_get_obj_map_idxs() { - local obj + local obj nmaps obj=$1 @@ -136,7 +136,7 @@ _sysfs_get_netdevs() # Retrieve type of the map that we are operating on. _bpftool_map_guess_map_type() { - local keyword ref + local keyword idx ref="" for (( idx=3; idx < ${#words[@]}-1; idx++ )); do case "${words[$((idx-2))]}" in lookup|update) @@ -255,8 +255,9 @@ _bpftool_map_update_get_name() _bpftool() { - local cur prev words objword json=0 - _init_completion || return + local cur prev words cword comp_args + local json=0 + _init_completion -- "$@" || return # Deal with options if [[ ${words[cword]} == -* ]]; then @@ -293,7 +294,7 @@ _bpftool() esac # Remove all options so completions don't have to deal with them. - local i + local i pprev for (( i=1; i < ${#words[@]}; )); do if [[ ${words[i]::1} == - ]] && [[ ${words[i]} != "-B" ]] && [[ ${words[i]} != "--base-btf" ]]; then @@ -307,7 +308,7 @@ _bpftool() prev=${words[cword - 1]} pprev=${words[cword - 2]} - local object=${words[1]} command=${words[2]} + local object=${words[1]} if [[ -z $object || $cword -eq 1 ]]; then case $cur in @@ -324,8 +325,12 @@ _bpftool() esac fi + local command=${words[2]} [[ $command == help ]] && return 0 + local MAP_TYPE='id pinned name' + local PROG_TYPE='id pinned tag name' + # Completion depends on object and command in use case $object in prog) @@ -346,8 +351,6 @@ _bpftool() ;; esac - local PROG_TYPE='id pinned tag name' - local MAP_TYPE='id pinned name' local METRIC_TYPE='cycles instructions l1d_loads llc_misses \ itlb_misses dtlb_misses' case $command in @@ -457,7 +460,7 @@ _bpftool() obj=${words[3]} if [[ ${words[-4]} == "map" ]]; then - COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) return 0 fi if [[ ${words[-3]} == "map" ]]; then @@ -480,13 +483,13 @@ _bpftool() action tracepoint raw_tracepoint \ xdp perf_event cgroup/skb cgroup/sock \ cgroup/dev lwt_in lwt_out lwt_xmit \ - lwt_seg6local sockops sk_skb sk_msg \ - lirc_mode2 cgroup/bind4 cgroup/bind6 \ - cgroup/connect4 cgroup/connect6 \ - cgroup/getpeername4 cgroup/getpeername6 \ - cgroup/getsockname4 cgroup/getsockname6 \ - cgroup/sendmsg4 cgroup/sendmsg6 \ - cgroup/recvmsg4 cgroup/recvmsg6 \ + lwt_seg6local sockops sk_skb sk_msg lirc_mode2 \ + cgroup/bind4 cgroup/bind6 \ + cgroup/connect4 cgroup/connect6 cgroup/connect_unix \ + cgroup/getpeername4 cgroup/getpeername6 cgroup/getpeername_unix \ + cgroup/getsockname4 cgroup/getsockname6 cgroup/getsockname_unix \ + cgroup/sendmsg4 cgroup/sendmsg6 cgroup/sendmsg_unix \ + cgroup/recvmsg4 cgroup/recvmsg6 cgroup/recvmsg_unix \ cgroup/post_bind4 cgroup/post_bind6 \ cgroup/sysctl cgroup/getsockopt \ cgroup/setsockopt cgroup/sock_release struct_ops \ @@ -541,20 +544,9 @@ _bpftool() COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) ) return 0 ;; - 6) - case $prev in - duration) - return 0 - ;; - *) - COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) ) - return 0 - ;; - esac - return 0 - ;; *) - COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) ) + [[ $prev == duration ]] && return 0 + _bpftool_once_attr "$METRIC_TYPE" return 0 ;; esac @@ -612,7 +604,7 @@ _bpftool() return 0 ;; register) - _filedir + [[ $prev == $command ]] && _filedir return 0 ;; *) @@ -638,9 +630,12 @@ _bpftool() pinned) _filedir ;; - *) + map) _bpftool_one_of_list $MAP_TYPE ;; + *) + _bpftool_once_attr 'map' + ;; esac return 0 ;; @@ -652,7 +647,6 @@ _bpftool() esac ;; map) - local MAP_TYPE='id pinned name' case $command in show|list|dump|peek|pop|dequeue|freeze) case $prev in @@ -793,13 +787,11 @@ _bpftool() # map, depending on the type of the map to update. case "$(_bpftool_map_guess_map_type)" in array_of_maps|hash_of_maps) - local MAP_TYPE='id pinned name' COMPREPLY+=( $( compgen -W "$MAP_TYPE" \ -- "$cur" ) ) return 0 ;; prog_array) - local PROG_TYPE='id pinned tag name' COMPREPLY+=( $( compgen -W "$PROG_TYPE" \ -- "$cur" ) ) return 0 @@ -821,7 +813,7 @@ _bpftool() esac _bpftool_once_attr 'key' - local UPDATE_FLAGS='any exist noexist' + local UPDATE_FLAGS='any exist noexist' idx for (( idx=3; idx < ${#words[@]}-1; idx++ )); do if [[ ${words[idx]} == 'value' ]]; then # 'value' is present, but is not the last @@ -893,7 +885,6 @@ _bpftool() esac ;; btf) - local PROG_TYPE='id pinned tag name' local MAP_TYPE='id pinned name' case $command in dump) @@ -1033,7 +1024,6 @@ _bpftool() local BPFTOOL_CGROUP_ATTACH_TYPES="$(bpftool feature list_builtins attach_types 2>/dev/null | \ grep '^cgroup_')" local ATTACH_FLAGS='multi override' - local PROG_TYPE='id pinned tag name' # Check for $prev = $command first if [ $prev = $command ]; then _filedir @@ -1086,7 +1076,6 @@ _bpftool() esac ;; net) - local PROG_TYPE='id pinned tag name' local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload' case $command in show|list) @@ -1193,14 +1182,14 @@ _bpftool() pin|detach) if [[ $prev == "$command" ]]; then COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) ) - else + elif [[ $pprev == "$command" ]]; then _filedir fi return 0 ;; *) [[ $prev == $object ]] && \ - COMPREPLY=( $( compgen -W 'help pin show list' -- "$cur" ) ) + COMPREPLY=( $( compgen -W 'help pin detach show list' -- "$cur" ) ) ;; esac ;; diff --git a/docs/Makefile b/docs/Makefile index 8355aea0..f402c4a4 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -31,9 +31,9 @@ see_also = $(subst " ",, \ "\n" \ "SEE ALSO\n" \ "========\n" \ - "\t**bpf**\ (2),\n" \ - "\t**bpf-helpers**\\ (7)" \ - $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \ + "**bpf**\ (2),\n" \ + "**bpf-helpers**\\ (7)" \ + $(foreach page,$(call list_pages,$(1)),",\n**$(page)**\\ (8)") \ "\n") $(OUTPUT)%.8: %.rst diff --git a/docs/bpftool-btf.rst b/docs/bpftool-btf.rst index 342716f7..f66781f2 100644 --- a/docs/bpftool-btf.rst +++ b/docs/bpftool-btf.rst @@ -14,82 +14,76 @@ tool for inspection of BTF data SYNOPSIS ======== - **bpftool** [*OPTIONS*] **btf** *COMMAND* +**bpftool** [*OPTIONS*] **btf** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } } +*OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } } - *COMMANDS* := { **dump** | **help** } +*COMMANDS* := { **dump** | **help** } BTF COMMANDS ============= -| **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*] -| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] -| **bpftool** **btf help** +| **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*] +| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] +| **bpftool** **btf help** | -| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* } -| *FORMAT* := { **raw** | **c** } -| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* } +| *FORMAT* := { **raw** | **c** } +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } DESCRIPTION =========== - **bpftool btf { show | list }** [**id** *BTF_ID*] - Show information about loaded BTF objects. If a BTF ID is - specified, show information only about given BTF object, - otherwise list all BTF objects currently loaded on the - system. +bpftool btf { show | list } [id *BTF_ID*] + Show information about loaded BTF objects. If a BTF ID is specified, show + information only about given BTF object, otherwise list all BTF objects + currently loaded on the system. - Since Linux 5.8 bpftool is able to discover information about - processes that hold open file descriptors (FDs) against BTF - objects. On such kernels bpftool will automatically emit this - information as well. + Since Linux 5.8 bpftool is able to discover information about processes + that hold open file descriptors (FDs) against BTF objects. On such kernels + bpftool will automatically emit this information as well. - **bpftool btf dump** *BTF_SRC* - Dump BTF entries from a given *BTF_SRC*. +bpftool btf dump *BTF_SRC* + Dump BTF entries from a given *BTF_SRC*. - When **id** is specified, BTF object with that ID will be - loaded and all its BTF types emitted. + When **id** is specified, BTF object with that ID will be loaded and all + its BTF types emitted. - When **map** is provided, it's expected that map has - associated BTF object with BTF types describing key and - value. It's possible to select whether to dump only BTF - type(s) associated with key (**key**), value (**value**), - both key and value (**kv**), or all BTF types present in - associated BTF object (**all**). If not specified, **kv** - is assumed. + When **map** is provided, it's expected that map has associated BTF object + with BTF types describing key and value. It's possible to select whether to + dump only BTF type(s) associated with key (**key**), value (**value**), + both key and value (**kv**), or all BTF types present in associated BTF + object (**all**). If not specified, **kv** is assumed. - When **prog** is provided, it's expected that program has - associated BTF object with BTF types. + When **prog** is provided, it's expected that program has associated BTF + object with BTF types. - When specifying *FILE*, an ELF file is expected, containing - .BTF section with well-defined BTF binary format data, - typically produced by clang or pahole. + When specifying *FILE*, an ELF file is expected, containing .BTF section + with well-defined BTF binary format data, typically produced by clang or + pahole. - **format** option can be used to override default (raw) - output format. Raw (**raw**) or C-syntax (**c**) output - formats are supported. + **format** option can be used to override default (raw) output format. Raw + (**raw**) or C-syntax (**c**) output formats are supported. - **bpftool btf help** - Print short help message. +bpftool btf help + Print short help message. OPTIONS ======= - .. include:: common_options.rst - - -B, --base-btf *FILE* - Pass a base BTF object. Base BTF objects are typically used - with BTF objects for kernel modules. To avoid duplicating - all kernel symbols required by modules, BTF objects for - modules are "split", they are built incrementally on top of - the kernel (vmlinux) BTF object. So the base BTF reference - should usually point to the kernel BTF. - - When the main BTF object to process (for example, the - module BTF to dump) is passed as a *FILE*, bpftool attempts - to autodetect the path for the base object, and passing - this option is optional. When the main BTF object is passed - through other handles, this option becomes necessary. +.. include:: common_options.rst + +-B, --base-btf *FILE* + Pass a base BTF object. Base BTF objects are typically used with BTF + objects for kernel modules. To avoid duplicating all kernel symbols + required by modules, BTF objects for modules are "split", they are + built incrementally on top of the kernel (vmlinux) BTF object. So the + base BTF reference should usually point to the kernel BTF. + + When the main BTF object to process (for example, the module BTF to + dump) is passed as a *FILE*, bpftool attempts to autodetect the path + for the base object, and passing this option is optional. When the main + BTF object is passed through other handles, this option becomes + necessary. EXAMPLES ======== diff --git a/docs/bpftool-cgroup.rst b/docs/bpftool-cgroup.rst index bd015ec9..b2610d16 100644 --- a/docs/bpftool-cgroup.rst +++ b/docs/bpftool-cgroup.rst @@ -14,124 +14,125 @@ tool for inspection and simple manipulation of eBPF progs SYNOPSIS ======== - **bpftool** [*OPTIONS*] **cgroup** *COMMAND* +**bpftool** [*OPTIONS*] **cgroup** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } } +*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } } - *COMMANDS* := - { **show** | **list** | **tree** | **attach** | **detach** | **help** } +*COMMANDS* := +{ **show** | **list** | **tree** | **attach** | **detach** | **help** } CGROUP COMMANDS =============== -| **bpftool** **cgroup** { **show** | **list** } *CGROUP* [**effective**] -| **bpftool** **cgroup tree** [*CGROUP_ROOT*] [**effective**] -| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] -| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* -| **bpftool** **cgroup help** +| **bpftool** **cgroup** { **show** | **list** } *CGROUP* [**effective**] +| **bpftool** **cgroup tree** [*CGROUP_ROOT*] [**effective**] +| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] +| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* +| **bpftool** **cgroup help** | -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } -| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** | -| **cgroup_inet_sock_create** | **cgroup_sock_ops** | -| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** | -| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** | -| **cgroup_inet4_connect** | **cgroup_inet6_connect** | -| **cgroup_inet4_getpeername** | **cgroup_inet6_getpeername** | -| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** | -| **cgroup_udp4_sendmsg** | **cgroup_udp6_sendmsg** | -| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** | -| **cgroup_sysctl** | **cgroup_getsockopt** | **cgroup_setsockopt** | -| **cgroup_inet_sock_release** } -| *ATTACH_FLAGS* := { **multi** | **override** } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** | +| **cgroup_inet_sock_create** | **cgroup_sock_ops** | +| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** | +| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** | +| **cgroup_inet4_connect** | **cgroup_inet6_connect** | +| **cgroup_unix_connect** | **cgroup_inet4_getpeername** | +| **cgroup_inet6_getpeername** | **cgroup_unix_getpeername** | +| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** | +| **cgroup_unix_getsockname** | **cgroup_udp4_sendmsg** | +| **cgroup_udp6_sendmsg** | **cgroup_unix_sendmsg** | +| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** | +| **cgroup_unix_recvmsg** | **cgroup_sysctl** | +| **cgroup_getsockopt** | **cgroup_setsockopt** | +| **cgroup_inet_sock_release** } +| *ATTACH_FLAGS* := { **multi** | **override** } DESCRIPTION =========== - **bpftool cgroup { show | list }** *CGROUP* [**effective**] - List all programs attached to the cgroup *CGROUP*. - - Output will start with program ID followed by attach type, - attach flags and program name. - - If **effective** is specified retrieve effective programs that - will execute for events within a cgroup. This includes - inherited along with attached ones. - - **bpftool cgroup tree** [*CGROUP_ROOT*] [**effective**] - Iterate over all cgroups in *CGROUP_ROOT* and list all - attached programs. If *CGROUP_ROOT* is not specified, - bpftool uses cgroup v2 mountpoint. - - The output is similar to the output of cgroup show/list - commands: it starts with absolute cgroup path, followed by - program ID, attach type, attach flags and program name. - - If **effective** is specified retrieve effective programs that - will execute for events within a cgroup. This includes - inherited along with attached ones. - - **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] - Attach program *PROG* to the cgroup *CGROUP* with attach type - *ATTACH_TYPE* and optional *ATTACH_FLAGS*. - - *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs - some bpf program, the program in this cgroup yields to sub-cgroup - program; **multi** if a sub-cgroup installs some bpf program, - that cgroup program gets run in addition to the program in this - cgroup. - - Only one program is allowed to be attached to a cgroup with - no attach flags or the **override** flag. Attaching another - program will release old program and attach the new one. - - Multiple programs are allowed to be attached to a cgroup with - **multi**. They are executed in FIFO order (those that were - attached first, run first). - - Non-default *ATTACH_FLAGS* are supported by kernel version 4.14 - and later. - - *ATTACH_TYPE* can be on of: - **ingress** ingress path of the inet socket (since 4.10); - **egress** egress path of the inet socket (since 4.10); - **sock_create** opening of an inet socket (since 4.10); - **sock_ops** various socket operations (since 4.12); - **device** device access (since 4.15); - **bind4** call to bind(2) for an inet4 socket (since 4.17); - **bind6** call to bind(2) for an inet6 socket (since 4.17); - **post_bind4** return from bind(2) for an inet4 socket (since 4.17); - **post_bind6** return from bind(2) for an inet6 socket (since 4.17); - **connect4** call to connect(2) for an inet4 socket (since 4.17); - **connect6** call to connect(2) for an inet6 socket (since 4.17); - **sendmsg4** call to sendto(2), sendmsg(2), sendmmsg(2) for an - unconnected udp4 socket (since 4.18); - **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an - unconnected udp6 socket (since 4.18); - **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for - an unconnected udp4 socket (since 5.2); - **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for - an unconnected udp6 socket (since 5.2); - **sysctl** sysctl access (since 5.2); - **getsockopt** call to getsockopt (since 5.3); - **setsockopt** call to setsockopt (since 5.3); - **getpeername4** call to getpeername(2) for an inet4 socket (since 5.8); - **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8); - **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8); - **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8). - **sock_release** closing an userspace inet socket (since 5.9). - - **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* - Detach *PROG* from the cgroup *CGROUP* and attach type - *ATTACH_TYPE*. - - **bpftool prog help** - Print short help message. +bpftool cgroup { show | list } *CGROUP* [effective] + List all programs attached to the cgroup *CGROUP*. + + Output will start with program ID followed by attach type, attach flags and + program name. + + If **effective** is specified retrieve effective programs that will execute + for events within a cgroup. This includes inherited along with attached + ones. + +bpftool cgroup tree [*CGROUP_ROOT*] [effective] + Iterate over all cgroups in *CGROUP_ROOT* and list all attached programs. + If *CGROUP_ROOT* is not specified, bpftool uses cgroup v2 mountpoint. + + The output is similar to the output of cgroup show/list commands: it starts + with absolute cgroup path, followed by program ID, attach type, attach + flags and program name. + + If **effective** is specified retrieve effective programs that will execute + for events within a cgroup. This includes inherited along with attached + ones. + +bpftool cgroup attach *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] + Attach program *PROG* to the cgroup *CGROUP* with attach type *ATTACH_TYPE* + and optional *ATTACH_FLAGS*. + + *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs some + bpf program, the program in this cgroup yields to sub-cgroup program; + **multi** if a sub-cgroup installs some bpf program, that cgroup program + gets run in addition to the program in this cgroup. + + Only one program is allowed to be attached to a cgroup with no attach flags + or the **override** flag. Attaching another program will release old + program and attach the new one. + + Multiple programs are allowed to be attached to a cgroup with **multi**. + They are executed in FIFO order (those that were attached first, run + first). + + Non-default *ATTACH_FLAGS* are supported by kernel version 4.14 and later. + + *ATTACH_TYPE* can be one of: + + - **ingress** ingress path of the inet socket (since 4.10) + - **egress** egress path of the inet socket (since 4.10) + - **sock_create** opening of an inet socket (since 4.10) + - **sock_ops** various socket operations (since 4.12) + - **device** device access (since 4.15) + - **bind4** call to bind(2) for an inet4 socket (since 4.17) + - **bind6** call to bind(2) for an inet6 socket (since 4.17) + - **post_bind4** return from bind(2) for an inet4 socket (since 4.17) + - **post_bind6** return from bind(2) for an inet6 socket (since 4.17) + - **connect4** call to connect(2) for an inet4 socket (since 4.17) + - **connect6** call to connect(2) for an inet6 socket (since 4.17) + - **connect_unix** call to connect(2) for a unix socket (since 6.7) + - **sendmsg4** call to sendto(2), sendmsg(2), sendmmsg(2) for an unconnected udp4 socket (since 4.18) + - **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an unconnected udp6 socket (since 4.18) + - **sendmsg_unix** call to sendto(2), sendmsg(2), sendmmsg(2) for an unconnected unix socket (since 6.7) + - **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for an unconnected udp4 socket (since 5.2) + - **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for an unconnected udp6 socket (since 5.2) + - **recvmsg_unix** call to recvfrom(2), recvmsg(2), recvmmsg(2) for an unconnected unix socket (since 6.7) + - **sysctl** sysctl access (since 5.2) + - **getsockopt** call to getsockopt (since 5.3) + - **setsockopt** call to setsockopt (since 5.3) + - **getpeername4** call to getpeername(2) for an inet4 socket (since 5.8) + - **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8) + - **getpeername_unix** call to getpeername(2) for a unix socket (since 6.7) + - **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8) + - **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8) + - **getsockname_unix** call to getsockname(2) for a unix socket (since 6.7) + - **sock_release** closing a userspace inet socket (since 5.9) + +bpftool cgroup detach *CGROUP* *ATTACH_TYPE* *PROG* + Detach *PROG* from the cgroup *CGROUP* and attach type *ATTACH_TYPE*. + +bpftool prog help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst - -f, --bpffs - Show file names of pinned programs. +-f, --bpffs + Show file names of pinned programs. EXAMPLES ======== diff --git a/docs/bpftool-feature.rst b/docs/bpftool-feature.rst index e44039f8..c7f83789 100644 --- a/docs/bpftool-feature.rst +++ b/docs/bpftool-feature.rst @@ -14,77 +14,70 @@ tool for inspection of eBPF-related parameters for Linux kernel or net device SYNOPSIS ======== - **bpftool** [*OPTIONS*] **feature** *COMMAND* +**bpftool** [*OPTIONS*] **feature** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| } +*OPTIONS* := { |COMMON_OPTIONS| } - *COMMANDS* := { **probe** | **help** } +*COMMANDS* := { **probe** | **help** } FEATURE COMMANDS ================ -| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]] -| **bpftool** **feature list_builtins** *GROUP* -| **bpftool** **feature help** +| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]] +| **bpftool** **feature list_builtins** *GROUP* +| **bpftool** **feature help** | -| *COMPONENT* := { **kernel** | **dev** *NAME* } -| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** } +| *COMPONENT* := { **kernel** | **dev** *NAME* } +| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** } DESCRIPTION =========== - **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]] - Probe the running kernel and dump a number of eBPF-related - parameters, such as availability of the **bpf**\ () system call, - JIT status, eBPF program types availability, eBPF helper - functions availability, and more. - - By default, bpftool **does not run probes** for - **bpf_probe_write_user**\ () and **bpf_trace_printk**\() - helpers which print warnings to kernel logs. To enable them - and run all probes, the **full** keyword should be used. - - If the **macros** keyword (but not the **-j** option) is - passed, a subset of the output is dumped as a list of - **#define** macros that are ready to be included in a C - header file, for example. If, additionally, **prefix** is - used to define a *PREFIX*, the provided string will be used - as a prefix to the names of the macros: this can be used to - avoid conflicts on macro names when including the output of - this command as a header file. - - Keyword **kernel** can be omitted. If no probe target is - specified, probing the kernel is the default behaviour. - - When the **unprivileged** keyword is used, bpftool will dump - only the features available to a user who does not have the - **CAP_SYS_ADMIN** capability set. The features available in - that case usually represent a small subset of the parameters - supported by the system. Unprivileged users MUST use the - **unprivileged** keyword: This is to avoid misdetection if - bpftool is inadvertently run as non-root, for example. This - keyword is unavailable if bpftool was compiled without - libcap. - - **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]] - Probe network device for supported eBPF features and dump - results to the console. - - The keywords **full**, **macros** and **prefix** have the - same role as when probing the kernel. - - **bpftool feature list_builtins** *GROUP* - List items known to bpftool. These can be BPF program types - (**prog_types**), BPF map types (**map_types**), attach types - (**attach_types**), link types (**link_types**), or BPF helper - functions (**helpers**). The command does not probe the system, but - simply lists the elements that bpftool knows from compilation time, - as provided from libbpf (for all object types) or from the BPF UAPI - header (list of helpers). This can be used in scripts to iterate over - BPF types or helpers. - - **bpftool feature help** - Print short help message. +bpftool feature probe [kernel] [full] [macros [prefix *PREFIX*]] + Probe the running kernel and dump a number of eBPF-related parameters, such + as availability of the **bpf**\ () system call, JIT status, eBPF program + types availability, eBPF helper functions availability, and more. + + By default, bpftool **does not run probes** for **bpf_probe_write_user**\ + () and **bpf_trace_printk**\() helpers which print warnings to kernel logs. + To enable them and run all probes, the **full** keyword should be used. + + If the **macros** keyword (but not the **-j** option) is passed, a subset + of the output is dumped as a list of **#define** macros that are ready to + be included in a C header file, for example. If, additionally, **prefix** + is used to define a *PREFIX*, the provided string will be used as a prefix + to the names of the macros: this can be used to avoid conflicts on macro + names when including the output of this command as a header file. + + Keyword **kernel** can be omitted. If no probe target is specified, probing + the kernel is the default behaviour. + + When the **unprivileged** keyword is used, bpftool will dump only the + features available to a user who does not have the **CAP_SYS_ADMIN** + capability set. The features available in that case usually represent a + small subset of the parameters supported by the system. Unprivileged users + MUST use the **unprivileged** keyword: This is to avoid misdetection if + bpftool is inadvertently run as non-root, for example. This keyword is + unavailable if bpftool was compiled without libcap. + +bpftool feature probe dev *NAME* [full] [macros [prefix *PREFIX*]] + Probe network device for supported eBPF features and dump results to the + console. + + The keywords **full**, **macros** and **prefix** have the same role as when + probing the kernel. + +bpftool feature list_builtins *GROUP* + List items known to bpftool. These can be BPF program types + (**prog_types**), BPF map types (**map_types**), attach types + (**attach_types**), link types (**link_types**), or BPF helper functions + (**helpers**). The command does not probe the system, but simply lists the + elements that bpftool knows from compilation time, as provided from libbpf + (for all object types) or from the BPF UAPI header (list of helpers). This + can be used in scripts to iterate over BPF types or helpers. + +bpftool feature help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst diff --git a/docs/bpftool-gen.rst b/docs/bpftool-gen.rst index 5006e724..c768e6d4 100644 --- a/docs/bpftool-gen.rst +++ b/docs/bpftool-gen.rst @@ -14,199 +14,177 @@ tool for BPF code-generation SYNOPSIS ======== - **bpftool** [*OPTIONS*] **gen** *COMMAND* +**bpftool** [*OPTIONS*] **gen** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } } +*OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } } - *COMMAND* := { **object** | **skeleton** | **help** } +*COMMAND* := { **object** | **skeleton** | **help** } GEN COMMANDS ============= -| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...] -| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*] -| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*] -| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] -| **bpftool** **gen help** +| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...] +| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*] +| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*] +| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] +| **bpftool** **gen help** DESCRIPTION =========== - **bpftool gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...] - Statically link (combine) together one or more *INPUT_FILE*'s - into a single resulting *OUTPUT_FILE*. All the files involved - are BPF ELF object files. - - The rules of BPF static linking are mostly the same as for - user-space object files, but in addition to combining data - and instruction sections, .BTF and .BTF.ext (if present in - any of the input files) data are combined together. .BTF - data is deduplicated, so all the common types across - *INPUT_FILE*'s will only be represented once in the resulting - BTF information. - - BPF static linking allows to partition BPF source code into - individually compiled files that are then linked into - a single resulting BPF object file, which can be used to - generated BPF skeleton (with **gen skeleton** command) or - passed directly into **libbpf** (using **bpf_object__open()** - family of APIs). - - **bpftool gen skeleton** *FILE* - Generate BPF skeleton C header file for a given *FILE*. - - BPF skeleton is an alternative interface to existing libbpf - APIs for working with BPF objects. Skeleton code is intended - to significantly shorten and simplify code to load and work - with BPF programs from userspace side. Generated code is - tailored to specific input BPF object *FILE*, reflecting its - structure by listing out available maps, program, variables, - etc. Skeleton eliminates the need to lookup mentioned - components by name. Instead, if skeleton instantiation - succeeds, they are populated in skeleton structure as valid - libbpf types (e.g., **struct bpf_map** pointer) and can be - passed to existing generic libbpf APIs. - - In addition to simple and reliable access to maps and - programs, skeleton provides a storage for BPF links (**struct - bpf_link**) for each BPF program within BPF object. When - requested, supported BPF programs will be automatically - attached and resulting BPF links stored for further use by - user in pre-allocated fields in skeleton struct. For BPF - programs that can't be automatically attached by libbpf, - user can attach them manually, but store resulting BPF link - in per-program link field. All such set up links will be - automatically destroyed on BPF skeleton destruction. This - eliminates the need for users to manage links manually and - rely on libbpf support to detach programs and free up - resources. - - Another facility provided by BPF skeleton is an interface to - global variables of all supported kinds: mutable, read-only, - as well as extern ones. This interface allows to pre-setup - initial values of variables before BPF object is loaded and - verified by kernel. For non-read-only variables, the same - interface can be used to fetch values of global variables on - userspace side, even if they are modified by BPF code. - - During skeleton generation, contents of source BPF object - *FILE* is embedded within generated code and is thus not - necessary to keep around. This ensures skeleton and BPF - object file are matching 1-to-1 and always stay in sync. - Generated code is dual-licensed under LGPL-2.1 and - BSD-2-Clause licenses. - - It is a design goal and guarantee that skeleton interfaces - are interoperable with generic libbpf APIs. User should - always be able to use skeleton API to create and load BPF - object, and later use libbpf APIs to keep working with - specific maps, programs, etc. - - As part of skeleton, few custom functions are generated. - Each of them is prefixed with object name. Object name can - either be derived from object file name, i.e., if BPF object - file name is **example.o**, BPF object name will be - **example**. Object name can be also specified explicitly - through **name** *OBJECT_NAME* parameter. The following - custom functions are provided (assuming **example** as - the object name): - - - **example__open** and **example__open_opts**. - These functions are used to instantiate skeleton. It - corresponds to libbpf's **bpf_object__open**\ () API. - **_opts** variants accepts extra **bpf_object_open_opts** - options. - - - **example__load**. - This function creates maps, loads and verifies BPF - programs, initializes global data maps. It corresponds to - libppf's **bpf_object__load**\ () API. - - - **example__open_and_load** combines **example__open** and - **example__load** invocations in one commonly used - operation. - - - **example__attach** and **example__detach** - This pair of functions allow to attach and detach, - correspondingly, already loaded BPF object. Only BPF - programs of types supported by libbpf for auto-attachment - will be auto-attached and their corresponding BPF links - instantiated. For other BPF programs, user can manually - create a BPF link and assign it to corresponding fields in - skeleton struct. **example__detach** will detach both - links created automatically, as well as those populated by - user manually. - - - **example__destroy** - Detach and unload BPF programs, free up all the resources - used by skeleton and BPF object. - - If BPF object has global variables, corresponding structs - with memory layout corresponding to global data data section - layout will be created. Currently supported ones are: *.data*, - *.bss*, *.rodata*, and *.kconfig* structs/data sections. - These data sections/structs can be used to set up initial - values of variables, if set before **example__load**. - Afterwards, if target kernel supports memory-mapped BPF - arrays, same structs can be used to fetch and update - (non-read-only) data from userspace, with same simplicity - as for BPF side. - - **bpftool gen subskeleton** *FILE* - Generate BPF subskeleton C header file for a given *FILE*. - - Subskeletons are similar to skeletons, except they do not own - the corresponding maps, programs, or global variables. They - require that the object file used to generate them is already - loaded into a *bpf_object* by some other means. - - This functionality is useful when a library is included into a - larger BPF program. A subskeleton for the library would have - access to all objects and globals defined in it, without - having to know about the larger program. - - Consequently, there are only two functions defined - for subskeletons: - - - **example__open(bpf_object\*)** - Instantiates a subskeleton from an already opened (but not - necessarily loaded) **bpf_object**. - - - **example__destroy()** - Frees the storage for the subskeleton but *does not* unload - any BPF programs or maps. - - **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] - Generate a minimum BTF file as *OUTPUT*, derived from a given - *INPUT* BTF file, containing all needed BTF types so one, or - more, given eBPF objects CO-RE relocations may be satisfied. - - When kernels aren't compiled with CONFIG_DEBUG_INFO_BTF, - libbpf, when loading an eBPF object, has to rely on external - BTF files to be able to calculate CO-RE relocations. - - Usually, an external BTF file is built from existing kernel - DWARF data using pahole. It contains all the types used by - its respective kernel image and, because of that, is big. - - The min_core_btf feature builds smaller BTF files, customized - to one or multiple eBPF objects, so they can be distributed - together with an eBPF CO-RE based application, turning the - application portable to different kernel versions. - - Check examples bellow for more information how to use it. - - **bpftool gen help** - Print short help message. +bpftool gen object *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...] + Statically link (combine) together one or more *INPUT_FILE*'s into a single + resulting *OUTPUT_FILE*. All the files involved are BPF ELF object files. + + The rules of BPF static linking are mostly the same as for user-space + object files, but in addition to combining data and instruction sections, + .BTF and .BTF.ext (if present in any of the input files) data are combined + together. .BTF data is deduplicated, so all the common types across + *INPUT_FILE*'s will only be represented once in the resulting BTF + information. + + BPF static linking allows to partition BPF source code into individually + compiled files that are then linked into a single resulting BPF object + file, which can be used to generated BPF skeleton (with **gen skeleton** + command) or passed directly into **libbpf** (using **bpf_object__open()** + family of APIs). + +bpftool gen skeleton *FILE* + Generate BPF skeleton C header file for a given *FILE*. + + BPF skeleton is an alternative interface to existing libbpf APIs for + working with BPF objects. Skeleton code is intended to significantly + shorten and simplify code to load and work with BPF programs from userspace + side. Generated code is tailored to specific input BPF object *FILE*, + reflecting its structure by listing out available maps, program, variables, + etc. Skeleton eliminates the need to lookup mentioned components by name. + Instead, if skeleton instantiation succeeds, they are populated in skeleton + structure as valid libbpf types (e.g., **struct bpf_map** pointer) and can + be passed to existing generic libbpf APIs. + + In addition to simple and reliable access to maps and programs, skeleton + provides a storage for BPF links (**struct bpf_link**) for each BPF program + within BPF object. When requested, supported BPF programs will be + automatically attached and resulting BPF links stored for further use by + user in pre-allocated fields in skeleton struct. For BPF programs that + can't be automatically attached by libbpf, user can attach them manually, + but store resulting BPF link in per-program link field. All such set up + links will be automatically destroyed on BPF skeleton destruction. This + eliminates the need for users to manage links manually and rely on libbpf + support to detach programs and free up resources. + + Another facility provided by BPF skeleton is an interface to global + variables of all supported kinds: mutable, read-only, as well as extern + ones. This interface allows to pre-setup initial values of variables before + BPF object is loaded and verified by kernel. For non-read-only variables, + the same interface can be used to fetch values of global variables on + userspace side, even if they are modified by BPF code. + + During skeleton generation, contents of source BPF object *FILE* is + embedded within generated code and is thus not necessary to keep around. + This ensures skeleton and BPF object file are matching 1-to-1 and always + stay in sync. Generated code is dual-licensed under LGPL-2.1 and + BSD-2-Clause licenses. + + It is a design goal and guarantee that skeleton interfaces are + interoperable with generic libbpf APIs. User should always be able to use + skeleton API to create and load BPF object, and later use libbpf APIs to + keep working with specific maps, programs, etc. + + As part of skeleton, few custom functions are generated. Each of them is + prefixed with object name. Object name can either be derived from object + file name, i.e., if BPF object file name is **example.o**, BPF object name + will be **example**. Object name can be also specified explicitly through + **name** *OBJECT_NAME* parameter. The following custom functions are + provided (assuming **example** as the object name): + + - **example__open** and **example__open_opts**. + These functions are used to instantiate skeleton. It corresponds to + libbpf's **bpf_object__open**\ () API. **_opts** variants accepts extra + **bpf_object_open_opts** options. + + - **example__load**. + This function creates maps, loads and verifies BPF programs, initializes + global data maps. It corresponds to libppf's **bpf_object__load**\ () + API. + + - **example__open_and_load** combines **example__open** and + **example__load** invocations in one commonly used operation. + + - **example__attach** and **example__detach**. + This pair of functions allow to attach and detach, correspondingly, + already loaded BPF object. Only BPF programs of types supported by libbpf + for auto-attachment will be auto-attached and their corresponding BPF + links instantiated. For other BPF programs, user can manually create a + BPF link and assign it to corresponding fields in skeleton struct. + **example__detach** will detach both links created automatically, as well + as those populated by user manually. + + - **example__destroy**. + Detach and unload BPF programs, free up all the resources used by + skeleton and BPF object. + + If BPF object has global variables, corresponding structs with memory + layout corresponding to global data data section layout will be created. + Currently supported ones are: *.data*, *.bss*, *.rodata*, and *.kconfig* + structs/data sections. These data sections/structs can be used to set up + initial values of variables, if set before **example__load**. Afterwards, + if target kernel supports memory-mapped BPF arrays, same structs can be + used to fetch and update (non-read-only) data from userspace, with same + simplicity as for BPF side. + +bpftool gen subskeleton *FILE* + Generate BPF subskeleton C header file for a given *FILE*. + + Subskeletons are similar to skeletons, except they do not own the + corresponding maps, programs, or global variables. They require that the + object file used to generate them is already loaded into a *bpf_object* by + some other means. + + This functionality is useful when a library is included into a larger BPF + program. A subskeleton for the library would have access to all objects and + globals defined in it, without having to know about the larger program. + + Consequently, there are only two functions defined for subskeletons: + + - **example__open(bpf_object\*)**. + Instantiates a subskeleton from an already opened (but not necessarily + loaded) **bpf_object**. + + - **example__destroy()**. + Frees the storage for the subskeleton but *does not* unload any BPF + programs or maps. + +bpftool gen min_core_btf *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] + Generate a minimum BTF file as *OUTPUT*, derived from a given *INPUT* BTF + file, containing all needed BTF types so one, or more, given eBPF objects + CO-RE relocations may be satisfied. + + When kernels aren't compiled with CONFIG_DEBUG_INFO_BTF, libbpf, when + loading an eBPF object, has to rely on external BTF files to be able to + calculate CO-RE relocations. + + Usually, an external BTF file is built from existing kernel DWARF data + using pahole. It contains all the types used by its respective kernel image + and, because of that, is big. + + The min_core_btf feature builds smaller BTF files, customized to one or + multiple eBPF objects, so they can be distributed together with an eBPF + CO-RE based application, turning the application portable to different + kernel versions. + + Check examples bellow for more information how to use it. + +bpftool gen help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst - -L, --use-loader - For skeletons, generate a "light" skeleton (also known as "loader" - skeleton). A light skeleton contains a loader eBPF program. It does - not use the majority of the libbpf infrastructure, and does not need - libelf. +-L, --use-loader + For skeletons, generate a "light" skeleton (also known as "loader" + skeleton). A light skeleton contains a loader eBPF program. It does not use + the majority of the libbpf infrastructure, and does not need libelf. EXAMPLES ======== @@ -257,18 +235,48 @@ EXAMPLES return 0; } -This is example BPF application with two BPF programs and a mix of BPF maps -and global variables. Source code is split across two source code files. +**$ cat example3.bpf.c** + +:: + + #include + #include + #include + /* This header file is provided by the bpf_testmod module. */ + #include "bpf_testmod.h" + + int test_2_result = 0; + + /* bpf_Testmod.ko calls this function, passing a "4" + * and testmod_map->data. + */ + SEC("struct_ops/test_2") + void BPF_PROG(test_2, int a, int b) + { + test_2_result = a + b; + } + + SEC(".struct_ops") + struct bpf_testmod_ops testmod_map = { + .test_2 = (void *)test_2, + .data = 0x1, + }; + +This is example BPF application with three BPF programs and a mix of BPF +maps and global variables. Source code is split across three source code +files. **$ clang --target=bpf -g example1.bpf.c -o example1.bpf.o** **$ clang --target=bpf -g example2.bpf.c -o example2.bpf.o** -**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o** +**$ clang --target=bpf -g example3.bpf.c -o example3.bpf.o** + +**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o example3.bpf.o** -This set of commands compiles *example1.bpf.c* and *example2.bpf.c* -individually and then statically links respective object files into the final -BPF ELF object file *example.bpf.o*. +This set of commands compiles *example1.bpf.c*, *example2.bpf.c* and +*example3.bpf.c* individually and then statically links respective object +files into the final BPF ELF object file *example.bpf.o*. **$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h** @@ -291,7 +299,15 @@ BPF ELF object file *example.bpf.o*. struct bpf_map *data; struct bpf_map *bss; struct bpf_map *my_map; + struct bpf_map *testmod_map; } maps; + struct { + struct example__testmod_map__bpf_testmod_ops { + const struct bpf_program *test_1; + const struct bpf_program *test_2; + int data; + } *testmod_map; + } struct_ops; struct { struct bpf_program *handle_sys_enter; struct bpf_program *handle_sys_exit; @@ -304,6 +320,7 @@ BPF ELF object file *example.bpf.o*. struct { int x; } data; + int test_2_result; } *bss; struct example__data { _Bool global_flag; @@ -342,10 +359,16 @@ BPF ELF object file *example.bpf.o*. skel->rodata->param1 = 128; + /* Change the value through the pointer of shadow type */ + skel->struct_ops.testmod_map->data = 13; + err = example__load(skel); if (err) goto cleanup; + /* The result of the function test_2() */ + printf("test_2_result: %d\n", skel->bss->test_2_result); + err = example__attach(skel); if (err) goto cleanup; @@ -372,6 +395,7 @@ BPF ELF object file *example.bpf.o*. :: + test_2_result: 17 my_map name: my_map sys_enter prog FD: 8 my_static_var: 7 diff --git a/docs/bpftool-iter.rst b/docs/bpftool-iter.rst index 84839d48..2e5d81c9 100644 --- a/docs/bpftool-iter.rst +++ b/docs/bpftool-iter.rst @@ -14,50 +14,46 @@ tool to create BPF iterators SYNOPSIS ======== - **bpftool** [*OPTIONS*] **iter** *COMMAND* +**bpftool** [*OPTIONS*] **iter** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| } +*OPTIONS* := { |COMMON_OPTIONS| } - *COMMANDS* := { **pin** | **help** } +*COMMANDS* := { **pin** | **help** } ITER COMMANDS -=================== +============= -| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*] -| **bpftool** **iter help** +| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*] +| **bpftool** **iter help** | -| *OBJ* := /a/file/of/bpf_iter_target.o -| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } +| *OBJ* := /a/file/of/bpf_iter_target.o +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } DESCRIPTION =========== - **bpftool iter pin** *OBJ* *PATH* [**map** *MAP*] - A bpf iterator combines a kernel iterating of - particular kernel data (e.g., tasks, bpf_maps, etc.) - and a bpf program called for each kernel data object - (e.g., one task, one bpf_map, etc.). User space can - *read* kernel iterator output through *read()* syscall. - - The *pin* command creates a bpf iterator from *OBJ*, - and pin it to *PATH*. The *PATH* should be located - in *bpffs* mount. It must not contain a dot - character ('.'), which is reserved for future extensions - of *bpffs*. - - Map element bpf iterator requires an additional parameter - *MAP* so bpf program can iterate over map elements for - that map. User can have a bpf program in kernel to run - with each map element, do checking, filtering, aggregation, - etc. without copying data to user space. - - User can then *cat PATH* to see the bpf iterator output. - - **bpftool iter help** - Print short help message. +bpftool iter pin *OBJ* *PATH* [map *MAP*] + A bpf iterator combines a kernel iterating of particular kernel data (e.g., + tasks, bpf_maps, etc.) and a bpf program called for each kernel data object + (e.g., one task, one bpf_map, etc.). User space can *read* kernel iterator + output through *read()* syscall. + + The *pin* command creates a bpf iterator from *OBJ*, and pin it to *PATH*. + The *PATH* should be located in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions of *bpffs*. + + Map element bpf iterator requires an additional parameter *MAP* so bpf + program can iterate over map elements for that map. User can have a bpf + program in kernel to run with each map element, do checking, filtering, + aggregation, etc. without copying data to user space. + + User can then *cat PATH* to see the bpf iterator output. + +bpftool iter help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst EXAMPLES ======== diff --git a/docs/bpftool-link.rst b/docs/bpftool-link.rst index 52a4eee4..6f09d440 100644 --- a/docs/bpftool-link.rst +++ b/docs/bpftool-link.rst @@ -14,67 +14,62 @@ tool for inspection and simple manipulation of eBPF links SYNOPSIS ======== - **bpftool** [*OPTIONS*] **link** *COMMAND* +**bpftool** [*OPTIONS*] **link** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } +*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } - *COMMANDS* := { **show** | **list** | **pin** | **help** } +*COMMANDS* := { **show** | **list** | **pin** | **help** } LINK COMMANDS ============= -| **bpftool** **link { show | list }** [*LINK*] -| **bpftool** **link pin** *LINK* *FILE* -| **bpftool** **link detach** *LINK* -| **bpftool** **link help** +| **bpftool** **link { show | list }** [*LINK*] +| **bpftool** **link pin** *LINK* *FILE* +| **bpftool** **link detach** *LINK* +| **bpftool** **link help** | -| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* } +| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* } DESCRIPTION =========== - **bpftool link { show | list }** [*LINK*] - Show information about active links. If *LINK* is - specified show information only about given link, - otherwise list all links currently active on the system. +bpftool link { show | list } [*LINK*] + Show information about active links. If *LINK* is specified show + information only about given link, otherwise list all links currently + active on the system. - Output will start with link ID followed by link type and - zero or more named attributes, some of which depend on type - of link. + Output will start with link ID followed by link type and zero or more named + attributes, some of which depend on type of link. - Since Linux 5.8 bpftool is able to discover information about - processes that hold open file descriptors (FDs) against BPF - links. On such kernels bpftool will automatically emit this - information as well. + Since Linux 5.8 bpftool is able to discover information about processes + that hold open file descriptors (FDs) against BPF links. On such kernels + bpftool will automatically emit this information as well. - **bpftool link pin** *LINK* *FILE* - Pin link *LINK* as *FILE*. +bpftool link pin *LINK* *FILE* + Pin link *LINK* as *FILE*. - Note: *FILE* must be located in *bpffs* mount. It must not - contain a dot character ('.'), which is reserved for future - extensions of *bpffs*. + Note: *FILE* must be located in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions of *bpffs*. - **bpftool link detach** *LINK* - Force-detach link *LINK*. BPF link and its underlying BPF - program will stay valid, but they will be detached from the - respective BPF hook and BPF link will transition into - a defunct state until last open file descriptor for that - link is closed. +bpftool link detach *LINK* + Force-detach link *LINK*. BPF link and its underlying BPF program will stay + valid, but they will be detached from the respective BPF hook and BPF link + will transition into a defunct state until last open file descriptor for + that link is closed. - **bpftool link help** - Print short help message. +bpftool link help + Print short help message. OPTIONS ======= - .. include:: common_options.rst + .. include:: common_options.rst - -f, --bpffs - When showing BPF links, show file names of pinned - links. + -f, --bpffs + When showing BPF links, show file names of pinned links. - -n, --nomount - Do not automatically attempt to mount any virtual file system - (such as tracefs or BPF virtual file system) when necessary. + -n, --nomount + Do not automatically attempt to mount any virtual file system (such as + tracefs or BPF virtual file system) when necessary. EXAMPLES ======== diff --git a/docs/bpftool-map.rst b/docs/bpftool-map.rst index 3b7ba037..252e4c53 100644 --- a/docs/bpftool-map.rst +++ b/docs/bpftool-map.rst @@ -14,166 +14,160 @@ tool for inspection and simple manipulation of eBPF maps SYNOPSIS ======== - **bpftool** [*OPTIONS*] **map** *COMMAND* +**bpftool** [*OPTIONS*] **map** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } +*OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } - *COMMANDS* := - { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | - **delete** | **pin** | **help** } +*COMMANDS* := +{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | +**delete** | **pin** | **help** } MAP COMMANDS ============= -| **bpftool** **map** { **show** | **list** } [*MAP*] -| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \ -| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \ -| [**offload_dev** *NAME*] -| **bpftool** **map dump** *MAP* -| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] -| **bpftool** **map lookup** *MAP* [**key** *DATA*] -| **bpftool** **map getnext** *MAP* [**key** *DATA*] -| **bpftool** **map delete** *MAP* **key** *DATA* -| **bpftool** **map pin** *MAP* *FILE* -| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] -| **bpftool** **map peek** *MAP* -| **bpftool** **map push** *MAP* **value** *VALUE* -| **bpftool** **map pop** *MAP* -| **bpftool** **map enqueue** *MAP* **value** *VALUE* -| **bpftool** **map dequeue** *MAP* -| **bpftool** **map freeze** *MAP* -| **bpftool** **map help** +| **bpftool** **map** { **show** | **list** } [*MAP*] +| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \ +| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \ +| [**offload_dev** *NAME*] +| **bpftool** **map dump** *MAP* +| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] +| **bpftool** **map lookup** *MAP* [**key** *DATA*] +| **bpftool** **map getnext** *MAP* [**key** *DATA*] +| **bpftool** **map delete** *MAP* **key** *DATA* +| **bpftool** **map pin** *MAP* *FILE* +| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] +| **bpftool** **map peek** *MAP* +| **bpftool** **map push** *MAP* **value** *VALUE* +| **bpftool** **map pop** *MAP* +| **bpftool** **map enqueue** *MAP* **value** *VALUE* +| **bpftool** **map dequeue** *MAP* +| **bpftool** **map freeze** *MAP* +| **bpftool** **map help** | -| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* } -| *DATA* := { [**hex**] *BYTES* } -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } -| *VALUE* := { *DATA* | *MAP* | *PROG* } -| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** } -| *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash** -| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash** -| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps** -| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash** -| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** -| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** -| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** } +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* } +| *DATA* := { [**hex**] *BYTES* } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } +| *VALUE* := { *DATA* | *MAP* | *PROG* } +| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** } +| *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash** +| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash** +| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps** +| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash** +| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** +| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** +| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** | **arena** } DESCRIPTION =========== - **bpftool map { show | list }** [*MAP*] - Show information about loaded maps. If *MAP* is specified - show information only about given maps, otherwise list all - maps currently loaded on the system. In case of **name**, - *MAP* may match several maps which will all be shown. +bpftool map { show | list } [*MAP*] + Show information about loaded maps. If *MAP* is specified show information + only about given maps, otherwise list all maps currently loaded on the + system. In case of **name**, *MAP* may match several maps which will all + be shown. - Output will start with map ID followed by map type and - zero or more named attributes (depending on kernel version). + Output will start with map ID followed by map type and zero or more named + attributes (depending on kernel version). - Since Linux 5.8 bpftool is able to discover information about - processes that hold open file descriptors (FDs) against BPF - maps. On such kernels bpftool will automatically emit this - information as well. + Since Linux 5.8 bpftool is able to discover information about processes + that hold open file descriptors (FDs) against BPF maps. On such kernels + bpftool will automatically emit this information as well. - **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**offload_dev** *NAME*] - Create a new map with given parameters and pin it to *bpffs* - as *FILE*. +bpftool map create *FILE* type *TYPE* key *KEY_SIZE* value *VALUE_SIZE* entries *MAX_ENTRIES* name *NAME* [flags *FLAGS*] [inner_map *MAP*] [offload_dev *NAME*] + Create a new map with given parameters and pin it to *bpffs* as *FILE*. - *FLAGS* should be an integer which is the combination of - desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h - UAPI header for existing flags). + *FLAGS* should be an integer which is the combination of desired flags, + e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h UAPI header for existing + flags). - To create maps of type array-of-maps or hash-of-maps, the - **inner_map** keyword must be used to pass an inner map. The - kernel needs it to collect metadata related to the inner maps - that the new map will work with. + To create maps of type array-of-maps or hash-of-maps, the **inner_map** + keyword must be used to pass an inner map. The kernel needs it to collect + metadata related to the inner maps that the new map will work with. - Keyword **offload_dev** expects a network interface name, - and is used to request hardware offload for the map. + Keyword **offload_dev** expects a network interface name, and is used to + request hardware offload for the map. - **bpftool map dump** *MAP* - Dump all entries in a given *MAP*. In case of **name**, - *MAP* may match several maps which will all be dumped. +bpftool map dump *MAP* + Dump all entries in a given *MAP*. In case of **name**, *MAP* may match + several maps which will all be dumped. - **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] - Update map entry for a given *KEY*. +bpftool map update *MAP* [key *DATA*] [value *VALUE*] [*UPDATE_FLAGS*] + Update map entry for a given *KEY*. - *UPDATE_FLAGS* can be one of: **any** update existing entry - or add if doesn't exit; **exist** update only if entry already - exists; **noexist** update only if entry doesn't exist. + *UPDATE_FLAGS* can be one of: **any** update existing entry or add if + doesn't exit; **exist** update only if entry already exists; **noexist** + update only if entry doesn't exist. - If the **hex** keyword is provided in front of the bytes - sequence, the bytes are parsed as hexadecimal values, even if - no "0x" prefix is added. If the keyword is not provided, then - the bytes are parsed as decimal values, unless a "0x" prefix - (for hexadecimal) or a "0" prefix (for octal) is provided. + If the **hex** keyword is provided in front of the bytes sequence, the + bytes are parsed as hexadecimal values, even if no "0x" prefix is added. If + the keyword is not provided, then the bytes are parsed as decimal values, + unless a "0x" prefix (for hexadecimal) or a "0" prefix (for octal) is + provided. - **bpftool map lookup** *MAP* [**key** *DATA*] - Lookup **key** in the map. +bpftool map lookup *MAP* [key *DATA*] + Lookup **key** in the map. - **bpftool map getnext** *MAP* [**key** *DATA*] - Get next key. If *key* is not specified, get first key. +bpftool map getnext *MAP* [key *DATA*] + Get next key. If *key* is not specified, get first key. - **bpftool map delete** *MAP* **key** *DATA* - Remove entry from the map. +bpftool map delete *MAP* key *DATA* + Remove entry from the map. - **bpftool map pin** *MAP* *FILE* - Pin map *MAP* as *FILE*. +bpftool map pin *MAP* *FILE* + Pin map *MAP* as *FILE*. - Note: *FILE* must be located in *bpffs* mount. It must not - contain a dot character ('.'), which is reserved for future - extensions of *bpffs*. + Note: *FILE* must be located in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions of *bpffs*. - **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] - Read events from a **BPF_MAP_TYPE_PERF_EVENT_ARRAY** map. +bpftool map event_pipe *MAP* [cpu *N* index *M*] + Read events from a **BPF_MAP_TYPE_PERF_EVENT_ARRAY** map. - Install perf rings into a perf event array map and dump - output of any **bpf_perf_event_output**\ () call in the kernel. - By default read the number of CPUs on the system and - install perf ring for each CPU in the corresponding index - in the array. + Install perf rings into a perf event array map and dump output of any + **bpf_perf_event_output**\ () call in the kernel. By default read the + number of CPUs on the system and install perf ring for each CPU in the + corresponding index in the array. - If **cpu** and **index** are specified, install perf ring - for given **cpu** at **index** in the array (single ring). + If **cpu** and **index** are specified, install perf ring for given **cpu** + at **index** in the array (single ring). - Note that installing a perf ring into an array will silently - replace any existing ring. Any other application will stop - receiving events if it installed its rings earlier. + Note that installing a perf ring into an array will silently replace any + existing ring. Any other application will stop receiving events if it + installed its rings earlier. - **bpftool map peek** *MAP* - Peek next value in the queue or stack. +bpftool map peek *MAP* + Peek next value in the queue or stack. - **bpftool map push** *MAP* **value** *VALUE* - Push *VALUE* onto the stack. +bpftool map push *MAP* value *VALUE* + Push *VALUE* onto the stack. - **bpftool map pop** *MAP* - Pop and print value from the stack. +bpftool map pop *MAP* + Pop and print value from the stack. - **bpftool map enqueue** *MAP* **value** *VALUE* - Enqueue *VALUE* into the queue. +bpftool map enqueue *MAP* value *VALUE* + Enqueue *VALUE* into the queue. - **bpftool map dequeue** *MAP* - Dequeue and print value from the queue. +bpftool map dequeue *MAP* + Dequeue and print value from the queue. - **bpftool map freeze** *MAP* - Freeze the map as read-only from user space. Entries from a - frozen map can not longer be updated or deleted with the - **bpf**\ () system call. This operation is not reversible, - and the map remains immutable from user space until its - destruction. However, read and write permissions for BPF - programs to the map remain unchanged. +bpftool map freeze *MAP* + Freeze the map as read-only from user space. Entries from a frozen map can + not longer be updated or deleted with the **bpf**\ () system call. This + operation is not reversible, and the map remains immutable from user space + until its destruction. However, read and write permissions for BPF programs + to the map remain unchanged. - **bpftool map help** - Print short help message. +bpftool map help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst - -f, --bpffs - Show file names of pinned maps. +-f, --bpffs + Show file names of pinned maps. - -n, --nomount - Do not automatically attempt to mount any virtual file system - (such as tracefs or BPF virtual file system) when necessary. +-n, --nomount + Do not automatically attempt to mount any virtual file system (such as + tracefs or BPF virtual file system) when necessary. EXAMPLES ======== diff --git a/docs/bpftool-net.rst b/docs/bpftool-net.rst index 5e2abd3d..f8e65869 100644 --- a/docs/bpftool-net.rst +++ b/docs/bpftool-net.rst @@ -14,76 +14,74 @@ tool for inspection of networking related bpf prog attachments SYNOPSIS ======== - **bpftool** [*OPTIONS*] **net** *COMMAND* +**bpftool** [*OPTIONS*] **net** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| } +*OPTIONS* := { |COMMON_OPTIONS| } - *COMMANDS* := - { **show** | **list** | **attach** | **detach** | **help** } +*COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } NET COMMANDS ============ -| **bpftool** **net** { **show** | **list** } [ **dev** *NAME* ] -| **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] -| **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* -| **bpftool** **net help** +| **bpftool** **net** { **show** | **list** } [ **dev** *NAME* ] +| **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] +| **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* +| **bpftool** **net help** | -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } -| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } DESCRIPTION =========== - **bpftool net { show | list }** [ **dev** *NAME* ] - List bpf program attachments in the kernel networking subsystem. - - Currently, device driver xdp attachments, tcx and old-style tc - classifier/action attachments, flow_dissector as well as netfilter - attachments are implemented, i.e., for - program types **BPF_PROG_TYPE_XDP**, **BPF_PROG_TYPE_SCHED_CLS**, - **BPF_PROG_TYPE_SCHED_ACT**, **BPF_PROG_TYPE_FLOW_DISSECTOR**, - **BPF_PROG_TYPE_NETFILTER**. - - For programs attached to a particular cgroup, e.g., - **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**, - **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, - users can use **bpftool cgroup** to dump cgroup attachments. - For sk_{filter, skb, msg, reuseport} and lwt/seg6 - bpf programs, users should consult other tools, e.g., iproute2. - - The current output will start with all xdp program attachments, followed by - all tcx, then tc class/qdisc bpf program attachments, then flow_dissector - and finally netfilter programs. Both xdp programs and tcx/tc programs are - ordered based on ifindex number. If multiple bpf programs attached - to the same networking device through **tc**, the order will be first - all bpf programs attached to tcx, then tc classes, then all bpf programs - attached to non clsact qdiscs, and finally all bpf programs attached - to root and clsact qdisc. - - **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] - Attach bpf program *PROG* to network interface *NAME* with - type specified by *ATTACH_TYPE*. Previously attached bpf program - can be replaced by the command used with **overwrite** option. - Currently, only XDP-related modes are supported for *ATTACH_TYPE*. - - *ATTACH_TYPE* can be of: - **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; - **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; - **xdpdrv** - Native XDP. runs earliest point in driver's receive path; - **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; - - **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* - Detach bpf program attached to network interface *NAME* with - type specified by *ATTACH_TYPE*. To detach bpf program, same - *ATTACH_TYPE* previously used for attach must be specified. - Currently, only XDP-related modes are supported for *ATTACH_TYPE*. - - **bpftool net help** - Print short help message. +bpftool net { show | list } [ dev *NAME* ] + List bpf program attachments in the kernel networking subsystem. + + Currently, device driver xdp attachments, tcx, netkit and old-style tc + classifier/action attachments, flow_dissector as well as netfilter + attachments are implemented, i.e., for program types **BPF_PROG_TYPE_XDP**, + **BPF_PROG_TYPE_SCHED_CLS**, **BPF_PROG_TYPE_SCHED_ACT**, + **BPF_PROG_TYPE_FLOW_DISSECTOR**, **BPF_PROG_TYPE_NETFILTER**. + + For programs attached to a particular cgroup, e.g., + **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**, + **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, users + can use **bpftool cgroup** to dump cgroup attachments. For sk_{filter, skb, + msg, reuseport} and lwt/seg6 bpf programs, users should consult other + tools, e.g., iproute2. + + The current output will start with all xdp program attachments, followed by + all tcx, netkit, then tc class/qdisc bpf program attachments, then + flow_dissector and finally netfilter programs. Both xdp programs and + tcx/netkit/tc programs are ordered based on ifindex number. If multiple bpf + programs attached to the same networking device through **tc**, the order + will be first all bpf programs attached to tcx, netkit, then tc classes, + then all bpf programs attached to non clsact qdiscs, and finally all bpf + programs attached to root and clsact qdisc. + +bpftool net attach *ATTACH_TYPE* *PROG* dev *NAME* [ overwrite ] + Attach bpf program *PROG* to network interface *NAME* with type specified + by *ATTACH_TYPE*. Previously attached bpf program can be replaced by the + command used with **overwrite** option. Currently, only XDP-related modes + are supported for *ATTACH_TYPE*. + + *ATTACH_TYPE* can be of: + **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; + **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; + **xdpdrv** - Native XDP. runs earliest point in driver's receive path; + **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; + +bpftool net detach *ATTACH_TYPE* dev *NAME* + Detach bpf program attached to network interface *NAME* with type specified + by *ATTACH_TYPE*. To detach bpf program, same *ATTACH_TYPE* previously used + for attach must be specified. Currently, only XDP-related modes are + supported for *ATTACH_TYPE*. + +bpftool net help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst EXAMPLES ======== diff --git a/docs/bpftool-perf.rst b/docs/bpftool-perf.rst index 5fea633a..8c1ae55b 100644 --- a/docs/bpftool-perf.rst +++ b/docs/bpftool-perf.rst @@ -14,37 +14,37 @@ tool for inspection of perf related bpf prog attachments SYNOPSIS ======== - **bpftool** [*OPTIONS*] **perf** *COMMAND* +**bpftool** [*OPTIONS*] **perf** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| } +*OPTIONS* := { |COMMON_OPTIONS| } - *COMMANDS* := - { **show** | **list** | **help** } +*COMMANDS* := +{ **show** | **list** | **help** } PERF COMMANDS ============= -| **bpftool** **perf** { **show** | **list** } -| **bpftool** **perf help** +| **bpftool** **perf** { **show** | **list** } +| **bpftool** **perf help** DESCRIPTION =========== - **bpftool perf { show | list }** - List all raw_tracepoint, tracepoint, kprobe attachment in the system. +bpftool perf { show | list } + List all raw_tracepoint, tracepoint, kprobe attachment in the system. - Output will start with process id and file descriptor in that process, - followed by bpf program id, attachment information, and attachment point. - The attachment point for raw_tracepoint/tracepoint is the trace probe name. - The attachment point for k[ret]probe is either symbol name and offset, - or a kernel virtual address. - The attachment point for u[ret]probe is the file name and the file offset. + Output will start with process id and file descriptor in that process, + followed by bpf program id, attachment information, and attachment point. + The attachment point for raw_tracepoint/tracepoint is the trace probe name. + The attachment point for k[ret]probe is either symbol name and offset, or a + kernel virtual address. The attachment point for u[ret]probe is the file + name and the file offset. - **bpftool perf help** - Print short help message. +bpftool perf help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst EXAMPLES ======== diff --git a/docs/bpftool-prog.rst b/docs/bpftool-prog.rst index dcae81bd..8e730cfb 100644 --- a/docs/bpftool-prog.rst +++ b/docs/bpftool-prog.rst @@ -14,248 +14,226 @@ tool for inspection and simple manipulation of eBPF progs SYNOPSIS ======== - **bpftool** [*OPTIONS*] **prog** *COMMAND* +**bpftool** [*OPTIONS*] **prog** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| | - { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | - { **-L** | **--use-loader** } } +*OPTIONS* := { |COMMON_OPTIONS| | +{ **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | +{ **-L** | **--use-loader** } } - *COMMANDS* := - { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | - **loadall** | **help** } +*COMMANDS* := +{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | +**loadall** | **help** } PROG COMMANDS ============= -| **bpftool** **prog** { **show** | **list** } [*PROG*] -| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }] -| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }] -| **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**] -| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] -| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] -| **bpftool** **prog tracelog** -| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*] -| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs* -| **bpftool** **prog help** +| **bpftool** **prog** { **show** | **list** } [*PROG*] +| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }] +| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }] +| **bpftool** **prog pin** *PROG* *FILE* +| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**] +| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] +| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] +| **bpftool** **prog tracelog** +| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*] +| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs* +| **bpftool** **prog help** | -| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } -| *TYPE* := { -| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | -| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** | -| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** | -| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | -| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | -| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** | -| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | -| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** | -| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** | -| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup** -| } -| *ATTACH_TYPE* := { -| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** | -| **sk_skb_stream_parser** | **flow_dissector** -| } -| *METRICs* := { -| **cycles** | **instructions** | **l1d_loads** | **llc_misses** | -| **itlb_misses** | **dtlb_misses** -| } +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } +| *TYPE* := { +| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | +| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** | +| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** | +| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | +| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | +| **cgroup/connect4** | **cgroup/connect6** | **cgroup/connect_unix** | +| **cgroup/getpeername4** | **cgroup/getpeername6** | **cgroup/getpeername_unix** | +| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/getsockname_unix** | +| **cgroup/sendmsg4** | **cgroup/sendmsg6** | **cgroup/sendmsg_unix** | +| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/recvmsg_unix** | **cgroup/sysctl** | +| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** | +| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup** +| } +| *ATTACH_TYPE* := { +| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** | +| **sk_skb_stream_parser** | **flow_dissector** +| } +| *METRICs* := { +| **cycles** | **instructions** | **l1d_loads** | **llc_misses** | +| **itlb_misses** | **dtlb_misses** +| } DESCRIPTION =========== - **bpftool prog { show | list }** [*PROG*] - Show information about loaded programs. If *PROG* is - specified show information only about given programs, - otherwise list all programs currently loaded on the system. - In case of **tag** or **name**, *PROG* may match several - programs which will all be shown. - - Output will start with program ID followed by program type and - zero or more named attributes (depending on kernel version). - - Since Linux 5.1 the kernel can collect statistics on BPF - programs (such as the total time spent running the program, - and the number of times it was run). If available, bpftool - shows such statistics. However, the kernel does not collect - them by defaults, as it slightly impacts performance on each - program run. Activation or deactivation of the feature is - performed via the **kernel.bpf_stats_enabled** sysctl knob. - - Since Linux 5.8 bpftool is able to discover information about - processes that hold open file descriptors (FDs) against BPF - programs. On such kernels bpftool will automatically emit this - information as well. - - **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }] - Dump eBPF instructions of the programs from the kernel. By - default, eBPF will be disassembled and printed to standard - output in human-readable format. In this case, **opcodes** - controls if raw opcodes should be printed as well. - - In case of **tag** or **name**, *PROG* may match several - programs which will all be dumped. However, if **file** or - **visual** is specified, *PROG* must match a single program. - - If **file** is specified, the binary image will instead be - written to *FILE*. - - If **visual** is specified, control flow graph (CFG) will be - built instead, and eBPF instructions will be presented with - CFG in DOT format, on standard output. - - If the programs have line_info available, the source line will - be displayed. If **linum** is specified, the filename, line - number and line column will also be displayed. - - **bpftool prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }] - Dump jited image (host machine code) of the program. - - If *FILE* is specified image will be written to a file, - otherwise it will be disassembled and printed to stdout. - *PROG* must match a single program when **file** is specified. - - **opcodes** controls if raw opcodes will be printed. - - If the prog has line_info available, the source line will - be displayed. If **linum** is specified, the filename, line - number and line column will also be displayed. - - **bpftool prog pin** *PROG* *FILE* - Pin program *PROG* as *FILE*. - - Note: *FILE* must be located in *bpffs* mount. It must not - contain a dot character ('.'), which is reserved for future - extensions of *bpffs*. - - **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**] - Load bpf program(s) from binary *OBJ* and pin as *PATH*. - **bpftool prog load** pins only the first program from the - *OBJ* as *PATH*. **bpftool prog loadall** pins all programs - from the *OBJ* under *PATH* directory. - **type** is optional, if not specified program type will be - inferred from section names. - By default bpftool will create new maps as declared in the ELF - object being loaded. **map** parameter allows for the reuse - of existing maps. It can be specified multiple times, each - time for a different map. *IDX* refers to index of the map - to be replaced in the ELF file counting from 0, while *NAME* - allows to replace a map by name. *MAP* specifies the map to - use, referring to it by **id** or through a **pinned** file. - If **offload_dev** *NAME* is specified program will be loaded - onto given networking device (offload). - If **xdpmeta_dev** *NAME* is specified program will become - device-bound without offloading, this facilitates access - to XDP metadata. - Optional **pinmaps** argument can be provided to pin all - maps under *MAP_DIR* directory. - - If **autoattach** is specified program will be attached - before pin. In that case, only the link (representing the - program attached to its hook) is pinned, not the program as - such, so the path won't show in **bpftool prog show -f**, - only show in **bpftool link show -f**. Also, this only works - when bpftool (libbpf) is able to infer all necessary - information from the object file, in particular, it's not - supported for all program types. If a program does not - support autoattach, bpftool falls back to regular pinning - for that program instead. - - Note: *PATH* must be located in *bpffs* mount. It must not - contain a dot character ('.'), which is reserved for future - extensions of *bpffs*. - - **bpftool prog attach** *PROG* *ATTACH_TYPE* [*MAP*] - Attach bpf program *PROG* (with type specified by - *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP* - parameter, with the exception of *flow_dissector* which is - attached to current networking name space. - - **bpftool prog detach** *PROG* *ATTACH_TYPE* [*MAP*] - Detach bpf program *PROG* (with type specified by - *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP* - parameter, with the exception of *flow_dissector* which is - detached from the current networking name space. - - **bpftool prog tracelog** - Dump the trace pipe of the system to the console (stdout). - Hit to stop printing. BPF programs can write to this - trace pipe at runtime with the **bpf_trace_printk**\ () helper. - This should be used only for debugging purposes. For - streaming data from BPF programs to user space, one can use - perf events (see also **bpftool-map**\ (8)). - - **bpftool prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*] - Run BPF program *PROG* in the kernel testing infrastructure - for BPF, meaning that the program works on the data and - context provided by the user, and not on actual packets or - monitored functions etc. Return value and duration for the - test run are printed out to the console. - - Input data is read from the *FILE* passed with **data_in**. - If this *FILE* is "**-**", input data is read from standard - input. Input context, if any, is read from *FILE* passed with - **ctx_in**. Again, "**-**" can be used to read from standard - input, but only if standard input is not already in use for - input data. If a *FILE* is passed with **data_out**, output - data is written to that file. Similarly, output context is - written to the *FILE* passed with **ctx_out**. For both - output flows, "**-**" can be used to print to the standard - output (as plain text, or JSON if relevant option was - passed). If output keywords are omitted, output data and - context are discarded. Keywords **data_size_out** and - **ctx_size_out** are used to pass the size (in bytes) for the - output buffers to the kernel, although the default of 32 kB - should be more than enough for most cases. - - Keyword **repeat** is used to indicate the number of - consecutive runs to perform. Note that output data and - context printed to files correspond to the last of those - runs. The duration printed out at the end of the runs is an - average over all runs performed by the command. - - Not all program types support test run. Among those which do, - not all of them can take the **ctx_in**/**ctx_out** - arguments. bpftool does not perform checks on program types. - - **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs* - Profile *METRICs* for bpf program *PROG* for *DURATION* - seconds or until user hits . *DURATION* is optional. - If *DURATION* is not specified, the profiling will run up to - **UINT_MAX** seconds. - - **bpftool prog help** - Print short help message. +bpftool prog { show | list } [*PROG*] + Show information about loaded programs. If *PROG* is specified show + information only about given programs, otherwise list all programs + currently loaded on the system. In case of **tag** or **name**, *PROG* may + match several programs which will all be shown. + + Output will start with program ID followed by program type and zero or more + named attributes (depending on kernel version). + + Since Linux 5.1 the kernel can collect statistics on BPF programs (such as + the total time spent running the program, and the number of times it was + run). If available, bpftool shows such statistics. However, the kernel does + not collect them by defaults, as it slightly impacts performance on each + program run. Activation or deactivation of the feature is performed via the + **kernel.bpf_stats_enabled** sysctl knob. + + Since Linux 5.8 bpftool is able to discover information about processes + that hold open file descriptors (FDs) against BPF programs. On such kernels + bpftool will automatically emit this information as well. + +bpftool prog dump xlated *PROG* [{ file *FILE* | [opcodes] [linum] [visual] }] + Dump eBPF instructions of the programs from the kernel. By default, eBPF + will be disassembled and printed to standard output in human-readable + format. In this case, **opcodes** controls if raw opcodes should be printed + as well. + + In case of **tag** or **name**, *PROG* may match several programs which + will all be dumped. However, if **file** or **visual** is specified, + *PROG* must match a single program. + + If **file** is specified, the binary image will instead be written to + *FILE*. + + If **visual** is specified, control flow graph (CFG) will be built instead, + and eBPF instructions will be presented with CFG in DOT format, on standard + output. + + If the programs have line_info available, the source line will be + displayed. If **linum** is specified, the filename, line number and line + column will also be displayed. + +bpftool prog dump jited *PROG* [{ file *FILE* | [opcodes] [linum] }] + Dump jited image (host machine code) of the program. + + If *FILE* is specified image will be written to a file, otherwise it will + be disassembled and printed to stdout. *PROG* must match a single program + when **file** is specified. + + **opcodes** controls if raw opcodes will be printed. + + If the prog has line_info available, the source line will be displayed. If + **linum** is specified, the filename, line number and line column will also + be displayed. + +bpftool prog pin *PROG* *FILE* + Pin program *PROG* as *FILE*. + + Note: *FILE* must be located in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions of *bpffs*. + +bpftool prog { load | loadall } *OBJ* *PATH* [type *TYPE*] [map { idx *IDX* | name *NAME* } *MAP*] [{ offload_dev | xdpmeta_dev } *NAME*] [pinmaps *MAP_DIR*] [autoattach] + Load bpf program(s) from binary *OBJ* and pin as *PATH*. **bpftool prog + load** pins only the first program from the *OBJ* as *PATH*. **bpftool prog + loadall** pins all programs from the *OBJ* under *PATH* directory. **type** + is optional, if not specified program type will be inferred from section + names. By default bpftool will create new maps as declared in the ELF + object being loaded. **map** parameter allows for the reuse of existing + maps. It can be specified multiple times, each time for a different map. + *IDX* refers to index of the map to be replaced in the ELF file counting + from 0, while *NAME* allows to replace a map by name. *MAP* specifies the + map to use, referring to it by **id** or through a **pinned** file. If + **offload_dev** *NAME* is specified program will be loaded onto given + networking device (offload). If **xdpmeta_dev** *NAME* is specified program + will become device-bound without offloading, this facilitates access to XDP + metadata. Optional **pinmaps** argument can be provided to pin all maps + under *MAP_DIR* directory. + + If **autoattach** is specified program will be attached before pin. In that + case, only the link (representing the program attached to its hook) is + pinned, not the program as such, so the path won't show in **bpftool prog + show -f**, only show in **bpftool link show -f**. Also, this only works + when bpftool (libbpf) is able to infer all necessary information from the + object file, in particular, it's not supported for all program types. If a + program does not support autoattach, bpftool falls back to regular pinning + for that program instead. + + Note: *PATH* must be located in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions of *bpffs*. + +bpftool prog attach *PROG* *ATTACH_TYPE* [*MAP*] + Attach bpf program *PROG* (with type specified by *ATTACH_TYPE*). Most + *ATTACH_TYPEs* require a *MAP* parameter, with the exception of + *flow_dissector* which is attached to current networking name space. + +bpftool prog detach *PROG* *ATTACH_TYPE* [*MAP*] + Detach bpf program *PROG* (with type specified by *ATTACH_TYPE*). Most + *ATTACH_TYPEs* require a *MAP* parameter, with the exception of + *flow_dissector* which is detached from the current networking name space. + +bpftool prog tracelog + Dump the trace pipe of the system to the console (stdout). Hit to + stop printing. BPF programs can write to this trace pipe at runtime with + the **bpf_trace_printk**\ () helper. This should be used only for debugging + purposes. For streaming data from BPF programs to user space, one can use + perf events (see also **bpftool-map**\ (8)). + +bpftool prog run *PROG* data_in *FILE* [data_out *FILE* [data_size_out *L*]] [ctx_in *FILE* [ctx_out *FILE* [ctx_size_out *M*]]] [repeat *N*] + Run BPF program *PROG* in the kernel testing infrastructure for BPF, + meaning that the program works on the data and context provided by the + user, and not on actual packets or monitored functions etc. Return value + and duration for the test run are printed out to the console. + + Input data is read from the *FILE* passed with **data_in**. If this *FILE* + is "**-**", input data is read from standard input. Input context, if any, + is read from *FILE* passed with **ctx_in**. Again, "**-**" can be used to + read from standard input, but only if standard input is not already in use + for input data. If a *FILE* is passed with **data_out**, output data is + written to that file. Similarly, output context is written to the *FILE* + passed with **ctx_out**. For both output flows, "**-**" can be used to + print to the standard output (as plain text, or JSON if relevant option was + passed). If output keywords are omitted, output data and context are + discarded. Keywords **data_size_out** and **ctx_size_out** are used to pass + the size (in bytes) for the output buffers to the kernel, although the + default of 32 kB should be more than enough for most cases. + + Keyword **repeat** is used to indicate the number of consecutive runs to + perform. Note that output data and context printed to files correspond to + the last of those runs. The duration printed out at the end of the runs is + an average over all runs performed by the command. + + Not all program types support test run. Among those which do, not all of + them can take the **ctx_in**/**ctx_out** arguments. bpftool does not + perform checks on program types. + +bpftool prog profile *PROG* [duration *DURATION*] *METRICs* + Profile *METRICs* for bpf program *PROG* for *DURATION* seconds or until + user hits . *DURATION* is optional. If *DURATION* is not specified, + the profiling will run up to **UINT_MAX** seconds. + +bpftool prog help + Print short help message. OPTIONS ======= - .. include:: common_options.rst - - -f, --bpffs - When showing BPF programs, show file names of pinned - programs. - - -m, --mapcompat - Allow loading maps with unknown map definitions. - - -n, --nomount - Do not automatically attempt to mount any virtual file system - (such as tracefs or BPF virtual file system) when necessary. - - -L, --use-loader - Load program as a "loader" program. This is useful to debug - the generation of such programs. When this option is in - use, bpftool attempts to load the programs from the object - file into the kernel, but does not pin them (therefore, the - *PATH* must not be provided). - - When combined with the **-d**\ \|\ **--debug** option, - additional debug messages are generated, and the execution - of the loader program will use the **bpf_trace_printk**\ () - helper to log each step of loading BTF, creating the maps, - and loading the programs (see **bpftool prog tracelog** as - a way to dump those messages). +.. include:: common_options.rst + +-f, --bpffs + When showing BPF programs, show file names of pinned programs. + +-m, --mapcompat + Allow loading maps with unknown map definitions. + +-n, --nomount + Do not automatically attempt to mount any virtual file system (such as + tracefs or BPF virtual file system) when necessary. + +-L, --use-loader + Load program as a "loader" program. This is useful to debug the generation + of such programs. When this option is in use, bpftool attempts to load the + programs from the object file into the kernel, but does not pin them + (therefore, the *PATH* must not be provided). + + When combined with the **-d**\ \|\ **--debug** option, additional debug + messages are generated, and the execution of the loader program will use + the **bpf_trace_printk**\ () helper to log each step of loading BTF, + creating the maps, and loading the programs (see **bpftool prog tracelog** + as a way to dump those messages). EXAMPLES ======== diff --git a/docs/bpftool-struct_ops.rst b/docs/bpftool-struct_ops.rst index 8022b532..e871b953 100644 --- a/docs/bpftool-struct_ops.rst +++ b/docs/bpftool-struct_ops.rst @@ -14,61 +14,60 @@ tool to register/unregister/introspect BPF struct_ops SYNOPSIS ======== - **bpftool** [*OPTIONS*] **struct_ops** *COMMAND* +**bpftool** [*OPTIONS*] **struct_ops** *COMMAND* - *OPTIONS* := { |COMMON_OPTIONS| } +*OPTIONS* := { |COMMON_OPTIONS| } - *COMMANDS* := - { **show** | **list** | **dump** | **register** | **unregister** | **help** } +*COMMANDS* := +{ **show** | **list** | **dump** | **register** | **unregister** | **help** } STRUCT_OPS COMMANDS =================== -| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*] -| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*] -| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*] -| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP* -| **bpftool** **struct_ops help** +| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*] +| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*] +| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*] +| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP* +| **bpftool** **struct_ops help** | -| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* } -| *OBJ* := /a/file/of/bpf_struct_ops.o +| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* } +| *OBJ* := /a/file/of/bpf_struct_ops.o DESCRIPTION =========== - **bpftool struct_ops { show | list }** [*STRUCT_OPS_MAP*] - Show brief information about the struct_ops in the system. - If *STRUCT_OPS_MAP* is specified, it shows information only - for the given struct_ops. Otherwise, it lists all struct_ops - currently existing in the system. - - Output will start with struct_ops map ID, followed by its map - name and its struct_ops's kernel type. - - **bpftool struct_ops dump** [*STRUCT_OPS_MAP*] - Dump details information about the struct_ops in the system. - If *STRUCT_OPS_MAP* is specified, it dumps information only - for the given struct_ops. Otherwise, it dumps all struct_ops - currently existing in the system. - - **bpftool struct_ops register** *OBJ* [*LINK_DIR*] - Register bpf struct_ops from *OBJ*. All struct_ops under - the ELF section ".struct_ops" and ".struct_ops.link" will - be registered to its kernel subsystem. For each - struct_ops in the ".struct_ops.link" section, a link - will be created. You can give *LINK_DIR* to provide a - directory path where these links will be pinned with the - same name as their corresponding map name. - - **bpftool struct_ops unregister** *STRUCT_OPS_MAP* - Unregister the *STRUCT_OPS_MAP* from the kernel subsystem. - - **bpftool struct_ops help** - Print short help message. +bpftool struct_ops { show | list } [*STRUCT_OPS_MAP*] + Show brief information about the struct_ops in the system. If + *STRUCT_OPS_MAP* is specified, it shows information only for the given + struct_ops. Otherwise, it lists all struct_ops currently existing in the + system. + + Output will start with struct_ops map ID, followed by its map name and its + struct_ops's kernel type. + +bpftool struct_ops dump [*STRUCT_OPS_MAP*] + Dump details information about the struct_ops in the system. If + *STRUCT_OPS_MAP* is specified, it dumps information only for the given + struct_ops. Otherwise, it dumps all struct_ops currently existing in the + system. + +bpftool struct_ops register *OBJ* [*LINK_DIR*] + Register bpf struct_ops from *OBJ*. All struct_ops under the ELF section + ".struct_ops" and ".struct_ops.link" will be registered to its kernel + subsystem. For each struct_ops in the ".struct_ops.link" section, a link + will be created. You can give *LINK_DIR* to provide a directory path where + these links will be pinned with the same name as their corresponding map + name. + +bpftool struct_ops unregister *STRUCT_OPS_MAP* + Unregister the *STRUCT_OPS_MAP* from the kernel subsystem. + +bpftool struct_ops help + Print short help message. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst EXAMPLES ======== diff --git a/docs/bpftool.rst b/docs/bpftool.rst index 6965c94d..f38ae5c4 100644 --- a/docs/bpftool.rst +++ b/docs/bpftool.rst @@ -14,57 +14,57 @@ tool for inspection and simple manipulation of eBPF programs and maps SYNOPSIS ======== - **bpftool** [*OPTIONS*] *OBJECT* { *COMMAND* | **help** } +**bpftool** [*OPTIONS*] *OBJECT* { *COMMAND* | **help** } - **bpftool** **batch file** *FILE* +**bpftool** **batch file** *FILE* - **bpftool** **version** +**bpftool** **version** - *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** | - **btf** | **gen** | **struct_ops** | **iter** } +*OBJECT* := { **map** | **prog** | **link** | **cgroup** | **perf** | **net** | **feature** | +**btf** | **gen** | **struct_ops** | **iter** } - *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| } +*OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| } - *MAP-COMMANDS* := - { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | - **delete** | **pin** | **event_pipe** | **help** } +*MAP-COMMANDS* := +{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | +**delete** | **pin** | **event_pipe** | **help** } - *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** | - **load** | **attach** | **detach** | **help** } +*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** | +**load** | **attach** | **detach** | **help** } - *LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** } +*LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** } - *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } +*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } - *PERF-COMMANDS* := { **show** | **list** | **help** } +*PERF-COMMANDS* := { **show** | **list** | **help** } - *NET-COMMANDS* := { **show** | **list** | **help** } +*NET-COMMANDS* := { **show** | **list** | **help** } - *FEATURE-COMMANDS* := { **probe** | **help** } +*FEATURE-COMMANDS* := { **probe** | **help** } - *BTF-COMMANDS* := { **show** | **list** | **dump** | **help** } +*BTF-COMMANDS* := { **show** | **list** | **dump** | **help** } - *GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** } +*GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** } - *STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** } +*STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** } - *ITER-COMMANDS* := { **pin** | **help** } +*ITER-COMMANDS* := { **pin** | **help** } DESCRIPTION =========== - *bpftool* allows for inspection and simple modification of BPF objects - on the system. +*bpftool* allows for inspection and simple modification of BPF objects on the +system. - Note that format of the output of all tools is not guaranteed to be - stable and should not be depended upon. +Note that format of the output of all tools is not guaranteed to be stable and +should not be depended upon. OPTIONS ======= - .. include:: common_options.rst +.. include:: common_options.rst - -m, --mapcompat - Allow loading maps with unknown map definitions. +-m, --mapcompat + Allow loading maps with unknown map definitions. - -n, --nomount - Do not automatically attempt to mount any virtual file system - (such as tracefs or BPF virtual file system) when necessary. +-n, --nomount + Do not automatically attempt to mount any virtual file system (such as + tracefs or BPF virtual file system) when necessary. diff --git a/docs/common_options.rst b/docs/common_options.rst index 30df7a70..9234b9da 100644 --- a/docs/common_options.rst +++ b/docs/common_options.rst @@ -1,25 +1,23 @@ .. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -h, --help - Print short help message (similar to **bpftool help**). + Print short help message (similar to **bpftool help**). -V, --version - Print bpftool's version number (similar to **bpftool version**), the - number of the libbpf version in use, and optional features that were - included when bpftool was compiled. Optional features include linking - against LLVM or libbfd to provide the disassembler for JIT-ted - programs (**bpftool prog dump jited**) and usage of BPF skeletons - (some features like **bpftool prog profile** or showing pids - associated to BPF objects may rely on it). + Print bpftool's version number (similar to **bpftool version**), the number + of the libbpf version in use, and optional features that were included when + bpftool was compiled. Optional features include linking against LLVM or + libbfd to provide the disassembler for JIT-ted programs (**bpftool prog + dump jited**) and usage of BPF skeletons (some features like **bpftool prog + profile** or showing pids associated to BPF objects may rely on it). -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. + Generate JSON output. For commands that cannot produce JSON, this option + has no effect. -p, --pretty - Generate human-readable JSON output. Implies **-j**. + Generate human-readable JSON output. Implies **-j**. -d, --debug - Print all logs available, even debug-level information. This includes - logs from libbpf as well as from the verifier, when attempting to - load programs. + Print all logs available, even debug-level information. This includes logs + from libbpf as well as from the verifier, when attempting to load programs. diff --git a/include/linux/types.h b/include/linux/types.h index f991d117..d2df5ed0 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -12,6 +12,7 @@ typedef uint64_t u64; typedef __u32 u32; +typedef __u16 u16; typedef __u8 u8; #define __bitwise__ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 70bfa997..79c54827 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -42,6 +42,7 @@ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ +#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ @@ -50,6 +51,10 @@ #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ +enum bpf_cond_pseudo_jmp { + BPF_MAY_GOTO = 0, +}; + /* Register numbers */ enum { BPF_REG_0 = 0, @@ -77,12 +82,29 @@ struct bpf_insn { __s32 imm; /* signed immediate constant */ }; -/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ +/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for + * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for + * the trailing flexible array member) instead. + */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u8 data[0]; /* Arbitrary size */ }; +/* Header for bpf_lpm_trie_key structs */ +struct bpf_lpm_trie_key_hdr { + __u32 prefixlen; +}; + +/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */ +struct bpf_lpm_trie_key_u8 { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + __u8 data[]; /* Arbitrary size */ +}; + struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ __u32 attach_type; /* program attach type (enum bpf_attach_type) */ @@ -617,7 +639,11 @@ union bpf_iter_link_info { * to NULL to begin the batched operation. After each subsequent * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant * *out_batch* as the *in_batch* for the next operation to - * continue iteration from the current point. + * continue iteration from the current point. Both *in_batch* and + * *out_batch* must point to memory large enough to hold a key, + * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH, + * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters + * must be at least 4 bytes wide regardless of key size. * * The *keys* and *values* are output parameters which must point * to memory large enough to hold *count* items based on the key @@ -847,6 +873,36 @@ union bpf_iter_link_info { * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * + * BPF_TOKEN_CREATE + * Description + * Create BPF token with embedded information about what + * BPF-related functionality it allows: + * - a set of allowed bpf() syscall commands; + * - a set of allowed BPF map types to be created with + * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed; + * - a set of allowed BPF program types and BPF program attach + * types to be loaded with BPF_PROG_LOAD command, if + * BPF_PROG_LOAD itself is allowed. + * + * BPF token is created (derived) from an instance of BPF FS, + * assuming it has necessary delegation mount options specified. + * This BPF token can be passed as an extra parameter to various + * bpf() syscall commands to grant BPF subsystem functionality to + * unprivileged processes. + * + * When created, BPF token is "associated" with the owning + * user namespace of BPF FS instance (super block) that it was + * derived from, and subsequent BPF operations performed with + * BPF token would be performing capabilities checks (i.e., + * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within + * that user namespace. Without BPF token, such capabilities + * have to be granted in init user namespace, making bpf() + * syscall incompatible with user namespace, for the most part. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * * NOTES * eBPF objects (maps and programs) can be shared between processes. * @@ -901,6 +957,8 @@ enum bpf_cmd { BPF_ITER_CREATE, BPF_LINK_DETACH, BPF_PROG_BIND_MAP, + BPF_TOKEN_CREATE, + __MAX_BPF_CMD, }; enum bpf_map_type { @@ -951,6 +1009,8 @@ enum bpf_map_type { BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_USER_RINGBUF, BPF_MAP_TYPE_CGRP_STORAGE, + BPF_MAP_TYPE_ARENA, + __MAX_BPF_MAP_TYPE }; /* Note that tracing related programs such as @@ -995,6 +1055,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_SK_LOOKUP, BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ BPF_PROG_TYPE_NETFILTER, + __MAX_BPF_PROG_TYPE }; enum bpf_attach_type { @@ -1047,6 +1108,13 @@ enum bpf_attach_type { BPF_TCX_INGRESS, BPF_TCX_EGRESS, BPF_TRACE_UPROBE_MULTI, + BPF_CGROUP_UNIX_CONNECT, + BPF_CGROUP_UNIX_SENDMSG, + BPF_CGROUP_UNIX_RECVMSG, + BPF_CGROUP_UNIX_GETPEERNAME, + BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_NETKIT_PRIMARY, + BPF_NETKIT_PEER, __MAX_BPF_ATTACH_TYPE }; @@ -1066,9 +1134,12 @@ enum bpf_link_type { BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, - MAX_BPF_LINK_TYPE, + BPF_LINK_TYPE_NETKIT = 13, + __MAX_BPF_LINK_TYPE, }; +#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE + enum bpf_perf_event_type { BPF_PERF_EVENT_UNSPEC = 0, BPF_PERF_EVENT_UPROBE = 1, @@ -1192,6 +1263,9 @@ enum bpf_perf_event_type { */ #define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) +/* The verifier internal test flag. Behavior is undefined */ +#define BPF_F_TEST_REG_INVARIANTS (1U << 7) + /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ @@ -1265,6 +1339,10 @@ enum { */ #define BPF_PSEUDO_KFUNC_CALL 2 +enum bpf_addr_space_cast { + BPF_ADDR_SPACE_CAST = 1, +}; + /* flags for BPF_MAP_UPDATE_ELEM command */ enum { BPF_ANY = 0, /* create new element or update existing */ @@ -1317,6 +1395,18 @@ enum { /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), + +/* Flag for value_type_btf_obj_fd, the fd is available */ + BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), + +/* BPF token FD is passed in a corresponding command's token_fd field */ + BPF_F_TOKEN_FD = (1U << 16), + +/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */ + BPF_F_SEGV_ON_FAULT = (1U << 17), + +/* Do not translate kernel bpf_arena pointers to user pointers */ + BPF_F_NO_USER_CONV = (1U << 18), }; /* Flags for BPF_PROG_QUERY. */ @@ -1388,8 +1478,20 @@ union bpf_attr { * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the * number of hash functions (if 0, the bloom filter will default * to using 5 hash functions). + * + * BPF_MAP_TYPE_ARENA - contains the address where user space + * is going to mmap() the arena. It has to be page aligned. */ __u64 map_extra; + + __s32 value_type_btf_obj_fd; /* fd pointing to a BTF + * type data for + * btf_vmlinux_value_type_id. + */ + /* BPF token FD to use with BPF_MAP_CREATE operation. + * If provided, map_flags should have BPF_F_TOKEN_FD flag set. + */ + __s32 map_token_fd; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -1459,6 +1561,10 @@ union bpf_attr { * truncated), or smaller (if log buffer wasn't filled completely). */ __u32 log_true_size; + /* BPF token FD to use with BPF_PROG_LOAD operation. + * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. + */ + __s32 prog_token_fd; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1556,8 +1662,10 @@ union bpf_attr { } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ - __u64 name; - __u32 prog_fd; + __u64 name; + __u32 prog_fd; + __u32 :32; + __aligned_u64 cookie; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ @@ -1571,6 +1679,11 @@ union bpf_attr { * truncated), or smaller (if log buffer wasn't filled completely). */ __u32 btf_log_true_size; + __u32 btf_flags; + /* BPF token FD to use with BPF_BTF_LOAD operation. + * If provided, btf_flags should have BPF_F_TOKEN_FD flag set. + */ + __s32 btf_token_fd; }; struct { @@ -1651,6 +1764,13 @@ union bpf_attr { __u32 flags; __u32 pid; } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; }; } link_create; @@ -1694,6 +1814,11 @@ union bpf_attr { __u32 flags; /* extra flags */ } prog_bind_map; + struct { /* struct used by BPF_TOKEN_CREATE command */ + __u32 flags; + __u32 bpffs_fd; + } token_create; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF @@ -2704,8 +2829,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: @@ -2943,8 +3068,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the same set of *optname*\ s that is supported by @@ -3264,6 +3389,15 @@ union bpf_attr { * and *params*->smac will not be set as output. A common * use case is to call **bpf_redirect_neigh**\ () after * doing **bpf_fib_lookup**\ (). + * **BPF_FIB_LOOKUP_SRC** + * Derive and set source IP addr in *params*->ipv{4,6}_src + * for the nexthop. If the src addr cannot be derived, + * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this + * case, *params*->dmac and *params*->smac are not set either. + * **BPF_FIB_LOOKUP_MARK** + * Use the mark present in *params*->mark for the fib lookup. + * This option should not be used with BPF_FIB_LOOKUP_DIRECT, + * as it only has meaning for full lookups. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -4497,6 +4631,8 @@ union bpf_attr { * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. + * Note: the user stack will only be populated if the *task* is + * the current task; all other tasks will return -EOPNOTSUPP. * To achieve this, the helper needs *task*, which is a valid * pointer to **struct task_struct**. To store the stacktrace, the * bpf program provides *buf* with a nonnegative *size*. @@ -4508,6 +4644,7 @@ union bpf_attr { * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. + * The *task* must be the current task. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. @@ -4811,9 +4948,9 @@ union bpf_attr { * going through the CPU's backlog queue. * * The *flags* argument is reserved and must be 0. The helper is - * currently only supported for tc BPF program types at the ingress - * hook and for veth device types. The peer device must reside in a - * different network namespace. + * currently only supported for tc BPF program types at the + * ingress hook and for veth and netkit target device types. The + * peer device must reside in a different network namespace. * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. @@ -4889,7 +5026,7 @@ union bpf_attr { * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, - * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if + * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if * invalid arguments are passed. * * struct socket *bpf_sock_from_file(struct file *file) @@ -5096,6 +5233,8 @@ union bpf_attr { * **BPF_F_TIMER_ABS** * Start the timer in absolute expire value instead of the * default relative one. + * **BPF_F_TIMER_CPU_PIN** + * Timer will be pinned to the CPU of the caller. * * Return * 0 on success. @@ -5373,7 +5512,7 @@ union bpf_attr { * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, - * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. * * void *bpf_kptr_xchg(void *map_value, void *ptr) @@ -6457,7 +6596,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; - __u32 :32; /* alignment pad */ + __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); @@ -6533,7 +6672,18 @@ struct bpf_link_info { __u32 count; /* in/out: kprobe_multi function count */ __u32 flags; __u64 missed; + __aligned_u64 cookies; } kprobe_multi; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 path_size; /* in/out: real path size on success, including zero byte */ + __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ + __u32 flags; + __u32 pid; + } uprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ __u32 :32; @@ -6542,6 +6692,7 @@ struct bpf_link_info { __aligned_u64 file_name; /* in/out */ __u32 name_len; __u32 offset; /* offset from file_name */ + __u64 cookie; } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ struct { __aligned_u64 func_name; /* in/out */ @@ -6549,14 +6700,19 @@ struct bpf_link_info { __u32 offset; /* offset from func_name */ __u64 addr; __u64 missed; + __u64 cookie; } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ struct { __aligned_u64 tp_name; /* in/out */ __u32 name_len; + __u32 :32; + __u64 cookie; } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */ struct { __u64 config; __u32 type; + __u32 :32; + __u64 cookie; } event; /* BPF_PERF_EVENT_EVENT */ }; } perf_event; @@ -6564,6 +6720,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; }; } __attribute__((aligned(8))); @@ -6860,6 +7020,7 @@ enum { BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, + BPF_TCP_BOUND_INACTIVE, BPF_TCP_MAX_STATES /* Leave at the end! */ }; @@ -6962,6 +7123,8 @@ enum { BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), BPF_FIB_LOOKUP_TBID = (1U << 3), + BPF_FIB_LOOKUP_SRC = (1U << 4), + BPF_FIB_LOOKUP_MARK = (1U << 5), }; enum { @@ -6974,6 +7137,7 @@ enum { BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { @@ -7008,6 +7172,9 @@ struct bpf_fib_lookup { __u32 rt_metric; }; + /* input: source address to consider for lookup + * output: source address result from lookup + */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ @@ -7035,8 +7202,19 @@ struct bpf_fib_lookup { __u32 tbid; }; - __u8 smac[6]; /* ETH_ALEN */ - __u8 dmac[6]; /* ETH_ALEN */ + union { + /* input */ + struct { + __u32 mark; /* policy routing */ + /* 2 4-byte holes for input */ + }; + + /* output: source and dest mac */ + struct { + __u8 smac[6]; /* ETH_ALEN */ + __u8 dmac[6]; /* ETH_ALEN */ + }; + }; }; struct bpf_redir_neigh { @@ -7120,40 +7298,31 @@ struct bpf_spin_lock { }; struct bpf_timer { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_dynptr { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_head { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_node { - __u64 :64; - __u64 :64; - __u64 :64; + __u64 __opaque[3]; } __attribute__((aligned(8))); struct bpf_rb_root { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_rb_node { - __u64 :64; - __u64 :64; - __u64 :64; - __u64 :64; + __u64 __opaque[4]; } __attribute__((aligned(8))); struct bpf_refcount { - __u32 :32; + __u32 __opaque[1]; } __attribute__((aligned(4))); struct bpf_sysctl { @@ -7309,9 +7478,11 @@ struct bpf_core_relo { * Flags to control bpf_timer_start() behaviour. * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is * relative to current time. + * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller. */ enum { BPF_F_TIMER_ABS = (1ULL << 0), + BPF_F_TIMER_CPU_PIN = (1ULL << 1), }; /* BPF numbers iterator state */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 39e659c8..f0d71b2a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,23 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; +}; + +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; }; /* The struct should be in sync with struct ifmap */ @@ -350,7 +370,13 @@ enum { IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, + IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ + + IFLA_DEVLINK_PORT, + IFLA_GSO_IPV4_MAX_SIZE, + IFLA_GRO_IPV4_MAX_SIZE, + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -539,6 +565,12 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, + IFLA_BRPORT_MAB, + IFLA_BRPORT_MCAST_N_GROUPS, + IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -716,7 +748,79 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -748,6 +852,8 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ + IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -781,6 +887,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -826,6 +933,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -865,6 +974,7 @@ enum { IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, IFLA_BOND_NS_IP6_TARGET, + IFLA_BOND_COUPLED_CONTROL, __IFLA_BOND_MAX, }; @@ -1162,6 +1272,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1179,10 +1300,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) @@ -1281,4 +1413,14 @@ enum { #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) +/* DSA section */ + +enum { + IFLA_DSA_UNSPEC, + IFLA_DSA_MASTER, + __IFLA_DSA_MAX, +}; + +#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250..3a64499b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 3faee019..bd4b227a 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -204,37 +204,6 @@ struct tc_u32_pcnt { #define TC_U32_MAXDEPTH 8 - -/* RSVP filter */ - -enum { - TCA_RSVP_UNSPEC, - TCA_RSVP_CLASSID, - TCA_RSVP_DST, - TCA_RSVP_SRC, - TCA_RSVP_PINFO, - TCA_RSVP_POLICE, - TCA_RSVP_ACT, - __TCA_RSVP_MAX -}; - -#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 ) - -struct tc_rsvp_gpi { - __u32 key; - __u32 mask; - int offset; -}; - -struct tc_rsvp_pinfo { - struct tc_rsvp_gpi dpi; - struct tc_rsvp_gpi spi; - __u8 protocol; - __u8 tunnelid; - __u8 tunnelhdr; - __u8 pad; -}; - /* ROUTE filter */ enum { @@ -265,22 +234,6 @@ enum { #define TCA_FW_MAX (__TCA_FW_MAX - 1) -/* TC index filter */ - -enum { - TCA_TCINDEX_UNSPEC, - TCA_TCINDEX_HASH, - TCA_TCINDEX_MASK, - TCA_TCINDEX_SHIFT, - TCA_TCINDEX_FALL_THROUGH, - TCA_TCINDEX_CLASSID, - TCA_TCINDEX_POLICE, - TCA_TCINDEX_ACT, - __TCA_TCINDEX_MAX -}; - -#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1) - /* Flow filter */ enum { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 5c903abc..587481a1 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -457,115 +457,6 @@ enum { #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) - -/* CBQ section */ - -#define TC_CBQ_MAXPRIO 8 -#define TC_CBQ_MAXLEVEL 8 -#define TC_CBQ_DEF_EWMA 5 - -struct tc_cbq_lssopt { - unsigned char change; - unsigned char flags; -#define TCF_CBQ_LSS_BOUNDED 1 -#define TCF_CBQ_LSS_ISOLATED 2 - unsigned char ewma_log; - unsigned char level; -#define TCF_CBQ_LSS_FLAGS 1 -#define TCF_CBQ_LSS_EWMA 2 -#define TCF_CBQ_LSS_MAXIDLE 4 -#define TCF_CBQ_LSS_MINIDLE 8 -#define TCF_CBQ_LSS_OFFTIME 0x10 -#define TCF_CBQ_LSS_AVPKT 0x20 - __u32 maxidle; - __u32 minidle; - __u32 offtime; - __u32 avpkt; -}; - -struct tc_cbq_wrropt { - unsigned char flags; - unsigned char priority; - unsigned char cpriority; - unsigned char __reserved; - __u32 allot; - __u32 weight; -}; - -struct tc_cbq_ovl { - unsigned char strategy; -#define TC_CBQ_OVL_CLASSIC 0 -#define TC_CBQ_OVL_DELAY 1 -#define TC_CBQ_OVL_LOWPRIO 2 -#define TC_CBQ_OVL_DROP 3 -#define TC_CBQ_OVL_RCLASSIC 4 - unsigned char priority2; - __u16 pad; - __u32 penalty; -}; - -struct tc_cbq_police { - unsigned char police; - unsigned char __res1; - unsigned short __res2; -}; - -struct tc_cbq_fopt { - __u32 split; - __u32 defmap; - __u32 defchange; -}; - -struct tc_cbq_xstats { - __u32 borrows; - __u32 overactions; - __s32 avgidle; - __s32 undertime; -}; - -enum { - TCA_CBQ_UNSPEC, - TCA_CBQ_LSSOPT, - TCA_CBQ_WRROPT, - TCA_CBQ_FOPT, - TCA_CBQ_OVL_STRATEGY, - TCA_CBQ_RATE, - TCA_CBQ_RTAB, - TCA_CBQ_POLICE, - __TCA_CBQ_MAX, -}; - -#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) - -/* dsmark section */ - -enum { - TCA_DSMARK_UNSPEC, - TCA_DSMARK_INDICES, - TCA_DSMARK_DEFAULT_INDEX, - TCA_DSMARK_SET_TC_INDEX, - TCA_DSMARK_MASK, - TCA_DSMARK_VALUE, - __TCA_DSMARK_MAX, -}; - -#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) - -/* ATM section */ - -enum { - TCA_ATM_UNSPEC, - TCA_ATM_FD, /* file/socket descriptor */ - TCA_ATM_PTR, /* pointer to descriptor - later */ - TCA_ATM_HDR, /* LL header */ - TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ - TCA_ATM_ADDR, /* PVC address (for output only) */ - TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ - __TCA_ATM_MAX, -}; - -#define TCA_ATM_MAX (__TCA_ATM_MAX - 1) - /* Network emulator */ enum { diff --git a/libbpf b/libbpf index e26b84dc..6d3595d2 160000 --- a/libbpf +++ b/libbpf @@ -1 +1 @@ -Subproject commit e26b84dc330c9644c07428c271ab491b0f01f4e1 +Subproject commit 6d3595d215b014d3eddb88038d686e1c20781534 diff --git a/scripts/sync-kernel-expected-diff.patch b/scripts/sync-kernel-expected-diff.patch index 769801a1..92496a54 100644 --- a/scripts/sync-kernel-expected-diff.patch +++ b/scripts/sync-kernel-expected-diff.patch @@ -76,7 +76,7 @@ ifneq ($(BPFTOOL_VERSION),) CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' endif -@@ -119,11 +117,7 @@ +@@ -123,11 +121,7 @@ endif ifeq ($(check_feat),1) @@ -89,7 +89,7 @@ endif LIBS = $(LIBBPF) -lelf -lz -@@ -213,7 +207,7 @@ +@@ -214,7 +208,7 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP) $(QUIET_CLANG)$(CLANG) \ -I$(or $(OUTPUT),.) \ @@ -97,21 +97,17 @@ + -I$(srctree)/include/uapi/ \ -I$(LIBBPF_BOOTSTRAP_INCLUDE) \ -g -O2 -Wall -fno-stack-protector \ - -target bpf -c $< -o $@ -@@ -231,10 +225,10 @@ + --target=bpf -c $< -o $@ +@@ -232,7 +226,7 @@ CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS) --$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c -+$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/src/kernel/bpf/disasm.c - $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@ - -$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c +$(OUTPUT)disasm.o: $(srctree)/src/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@ $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP) -@@ -253,7 +247,7 @@ +@@ -251,7 +245,7 @@ $(call QUIET_CLEAN, feature-detect) $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null @@ -120,7 +116,7 @@ $(call QUIET_CLEAN, bpftool) $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d $(Q)$(RM) -- $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h -@@ -269,7 +263,7 @@ +@@ -267,7 +261,7 @@ install: install-bin $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir) @@ -129,7 +125,7 @@ uninstall: $(call QUIET_UNINST, bpftool) -@@ -277,16 +271,16 @@ +@@ -275,16 +269,16 @@ $(Q)$(RM) -- $(DESTDIR)$(bash_compdir)/bpftool doc: diff --git a/scripts/test_bpftool.py b/scripts/test_bpftool.py new file mode 100644 index 00000000..b13bfa16 --- /dev/null +++ b/scripts/test_bpftool.py @@ -0,0 +1,2017 @@ +# This is a test file for bpftool's bash completion. +# +# Usage: +# +# $ git clone https://github.com/scop/bash-completion.git +# $ cd bash-completion +# $ cp /.../bpftool/bash-completion/bpftool bash-completion/completions/ +# $ cp /.../bpftool/scripts/test_bpftool.py test/t/ +# $ pytest-3 -k test_bpftool -vv test/t + +import pytest +from conftest import assert_bash_exec +import os, re, psutil, random + + +class TestBpftool: + + # Helpers + + def is_root(self): + return os.getuid() == 0 + + def all_ints(self, completion): + # If non-root, list should be empty + if not self.is_root(): + return True if not completion else False + + # Else, assume we've set up at least one object with id + if not completion: + return False + for id in completion: + if not id.isdigit(): + return False + return True + + def all_tags(self, completion): + # If non-root, completion should be empty + if not self.is_root(): + return True if not completion else False + + # Else, assume we've set up at least one object with tag + if not completion: + return False + for tag in completion: + if not re.match(r"[a-f0-9]{16}$", tag): + return False + return True + + def all_paths(self, completion): + for path in completion: + if not os.path.exists(path): + return False + return True + + commands = [ + "batch", + "btf", + "cgroup", + "feature", + "gen", + "help", + "iter", + "link", + "map", + "net", + "perf", + "prog", + "struct_ops", + ] + + longopts = [ + "--base-btf", + "--bpffs", + "--debug", + "--json", + "--mapcompat", + "--pretty", + "--use-loader", + "--version", + ] + + map_types = [ + "arena", + "array", + "array_of_maps", + "bloom_filter", + "cgroup_array", + "cgroup_storage", + "cgrp_storage", + "cpumap", + "devmap", + "devmap_hash", + "hash", + "hash_of_maps", + "inode_storage", + "lpm_trie", + "lru_hash", + "lru_percpu_hash", + "percpu_array", + "percpu_cgroup_storage", + "percpu_hash", + "perf_event_array", + "prog_array", + "queue", + "reuseport_sockarray", + "ringbuf", + "sk_storage", + "sockhash", + "sockmap", + "stack", + "stack_trace", + "struct_ops", + "task_storage", + "user_ringbuf", + "xskmap", + ] + + prog_types = [ + "action", + "cgroup/bind4", + "cgroup/bind6", + "cgroup/connect4", + "cgroup/connect6", + "cgroup/connect_unix", + "cgroup/dev", + "cgroup/getpeername4", + "cgroup/getpeername6", + "cgroup/getpeername_unix", + "cgroup/getsockname4", + "cgroup/getsockname6", + "cgroup/getsockname_unix", + "cgroup/getsockopt", + "cgroup/post_bind4", + "cgroup/post_bind6", + "cgroup/recvmsg4", + "cgroup/recvmsg6", + "cgroup/recvmsg_unix", + "cgroup/sendmsg4", + "cgroup/sendmsg6", + "cgroup/sendmsg_unix", + "cgroup/setsockopt", + "cgroup/skb", + "cgroup/sock", + "cgroup/sock_release", + "cgroup/sysctl", + "classifier", + "fentry", + "fexit", + "flow_dissector", + "freplace", + "kprobe", + "kretprobe", + "lirc_mode2", + "lwt_in", + "lwt_out", + "lwt_seg6local", + "lwt_xmit", + "perf_event", + "raw_tracepoint", + "sk_lookup", + "sk_msg", + "sk_skb", + "socket", + "sockops", + "struct_ops", + "tracepoint", + "xdp", + ] + + prog_attach_types = [ + "flow_dissector", + "sk_msg_verdict", + "sk_skb_stream_parser", + "sk_skb_stream_verdict", + "sk_skb_verdict", + ] + + cgroup_attach_types = [ + "cgroup_device", + "cgroup_getsockopt", + "cgroup_inet4_bind", + "cgroup_inet4_connect", + "cgroup_inet4_getpeername", + "cgroup_inet4_getsockname", + "cgroup_inet4_post_bind", + "cgroup_inet6_bind", + "cgroup_inet6_connect", + "cgroup_inet6_getpeername", + "cgroup_inet6_getsockname", + "cgroup_inet6_post_bind", + "cgroup_inet_egress", + "cgroup_inet_ingress", + "cgroup_inet_sock_create", + "cgroup_inet_sock_release", + "cgroup_setsockopt", + "cgroup_sock_ops", + "cgroup_sysctl", + "cgroup_udp4_recvmsg", + "cgroup_udp4_sendmsg", + "cgroup_udp6_recvmsg", + "cgroup_udp6_sendmsg", + "cgroup_unix_connect", + "cgroup_unix_getpeername", + "cgroup_unix_getsockname", + "cgroup_unix_recvmsg", + "cgroup_unix_sendmsg", + ] + + # Fixtures + + @pytest.fixture(scope="class") + def ifnames(self): + return list(psutil.net_if_addrs().keys()) + + @pytest.fixture(scope="class") + def get_objfile(self, bash): + src_file = "/tmp/bash_comp_test.c" + obj_file = "/tmp/bash_comp_test.o" + map_name = "bash_comp_map" + prog_name = "bash_comp_test" + f = open(src_file, "w") + f.write(f""" +#include +#include + +struct {{ + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +}} {map_name} SEC(".maps"); + +int SEC("tracepoint/syscalls/sys_enter_open") +{prog_name}(__attribute__((unused)) void *ctx) +{{ + __u32 key = 0; + __u32 *value; + + value = bpf_map_lookup_elem(&{map_name}, &key); + if (!value) + return 0; + + return *value; +}} + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; +""") + f.close() + + assert_bash_exec( + bash, + f"clang -g -O2 -fno-asynchronous-unwind-tables -emit-llvm " \ + f"-c {src_file} -o - | " \ + f"llc -march=bpf -mcpu=probe -filetype=obj -o {obj_file}", + ) + + yield { + "path": obj_file, + "map_name": map_name, + "prog_name": prog_name, + } + try: + # May fail, I think when distributing the load to multiple CPUs + # there may be several runs of the fixture. + os.remove(obj_file) + os.remove(src_file) + except: + pass + + @pytest.fixture(scope="class") + def get_bpf_link(self, bash, get_objfile): + if not self.is_root(): + return { "id": None } + + objfile = get_objfile + obj_path = objfile["path"] + prog_name = objfile["prog_name"] + rand = random.randint(1000, 9999) + link = f"/sys/fs/bpf/{prog_name}-{rand}" + + assert_bash_exec( + bash, + f"bpftool prog load {obj_path} {link} autoattach" + ) + id = assert_bash_exec( + bash, + # For each link, hold line with id, go through the list of + # pinned paths, if we find ours then swap pattern and hold + # spaces, extract and print id, then quit. + "bpftool -f link show | sed -n '/^[0-9]\\+:/{{h;b}}; " \ + f"\\@{link}@ {{x;s/^\\([0-9]\\+\\):.*/\\1/p;q}}'", + want_output = True + ).strip() + + yield { + "path": link, + "id": id, + } + os.remove(link) + + @pytest.fixture(scope="class") + def get_struct_ops(self, bash): + if not self.is_root(): + return { "name": None } + + src_file = "/tmp/bash_comp_struct_ops.c" + obj_file = "/tmp/bash_comp_struct_ops.o" + rand = random.randint(1000, 9999) + struct_ops_name = f"bashc_stop_{rand}" + f = open(src_file, "w") + f.write(f""" +#include +#include +#include + +void SEC("struct_ops/tcp_empty") +BPF_PROG(tcp_empty, struct sock *sk) +{{ +}} + +SEC(".struct_ops") +struct tcp_congestion_ops bash_comp_dummy_struct_ops = {{ + .init = (void *)tcp_empty, + .name = "{struct_ops_name}", +}}; + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; +""") + f.close() + + assert_bash_exec( + bash, + f"clang -g -O2 -fno-asynchronous-unwind-tables -emit-llvm " \ + f"-c {src_file} -o - | " \ + f"llc -march=bpf -mcpu=probe -filetype=obj -o {obj_file}", + ) + + assert_bash_exec( + bash, + f"bpftool struct_ops register {obj_file}" + ) + struct_ops_map_id = assert_bash_exec( + bash, + "bpftool struct_ops list | " \ + f"sed -n '/^[0-9]\\+: {struct_ops_name} .*/\\1/{{p;q}}'", + want_output = True + ).strip() + try: + # May fail, I think when distributing the load to multiple CPUs + # there may be several runs of the fixture. + os.remove(obj_file) + os.remove(src_file) + except: + pass + + yield { + "map_name": struct_ops_name, + "map_id": struct_ops_map_id, + } + try: + assert_bash_exec( + bash, + f"bpftool struct_ops unregister name {struct_ops_name}" + ) + except: + pass + + # bpftool and options + + @pytest.mark.complete("bpftool ", require_cmd=True) + def test_basic(self, completion): + assert completion == self.commands + + @pytest.mark.complete("bpftool -") + def test_dash(self, completion): + assert completion == self.longopts + + @pytest.mark.complete("bpftool --") + def test_double_dash(self, completion): + assert completion == self.longopts + + @pytest.mark.complete("bpftool -j") + def test_json_short(self, completion): + """Option -j is complete, no completion returned""" + assert not completion + + @pytest.mark.complete("bpftool --js") + def test_json(self, completion): + assert completion == "on" + + @pytest.mark.complete("bpftool --deb") + def test_debug(self, completion): + assert completion == "ug" + + @pytest.mark.complete("bpftool --debug -j --version -p -d ", require_cmd=True) + def test_many_options(self, completion): + assert completion == self.commands + + @pytest.mark.complete("bpftool --json net ") + def test_opt_cmd(self, completion): + assert completion == "attach detach help list show".split() + + @pytest.mark.complete("bpftool net --debug ") + def test_cmd_opt(self, completion): + assert completion == "attach detach help list show".split() + + # bpftool btf + + @pytest.mark.complete("bpftool btf ") + def test_btf(self, completion): + assert completion == "dump help list show".split() + + @pytest.mark.complete("bpftool btf help ") + def test_btf_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool btf list ") + def test_btf_list(self, completion): + assert completion == "id" + + @pytest.mark.complete("bpftool btf show ") + def test_btf_show(self, completion): + assert completion == "id" + + @pytest.mark.complete("bpftool btf show id ") + def test_btf_show_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool btf show id 1 ") + def test_btf_show_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool btf dump ") + def test_btf_dump(self, completion): + assert completion == "file id map prog".split() + + @pytest.mark.complete("bpftool btf dump id ") + def test_btf_dump_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool btf dump prog ") + def test_btf_dump_prog(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool btf dump prog id ") + def test_btf_dump_prog_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool btf dump prog id 1 ") + def test_btf_dump_prog_id_xxx(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump prog name ") + def test_btf_dump_prog_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool btf dump prog name some_name ") + def test_btf_dump_prog_name_xxx(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump prog pinned ") + def test_btf_dump_prog_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool btf dump prog pinned /some_map ") + def test_btf_dump_prog_pinned_xxx(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump prog tag ") + def test_btf_dump_prog_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool btf dump prog tag some_tag ") + def test_btf_dump_prog_tag_xxx(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump map ") + def test_btf_dump_map(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool btf dump map id ") + def test_btf_dump_map_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool btf dump map id 1 ") + def test_btf_dump_map_id_xxx(self, completion): + assert completion == "all format key kv value".split() + + @pytest.mark.complete("bpftool btf dump map pinned ") + def test_btf_dump_map_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool btf dump map pinned /some_prog ") + def test_btf_dump_map_pinned_xxx(self, completion): + assert completion == "all format key kv value".split() + + @pytest.mark.complete("bpftool btf dump map id 1 key ") + def test_btf_dump_map_id_xxx_key(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump map id 1 value ") + def test_btf_dump_map_id_xxx_value(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump map id 1 kv ") + def test_btf_dump_map_id_xxx_kv(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump map id 1 all ") + def test_btf_dump_map_id_xxx_all(self, completion): + assert completion == "format" + + @pytest.mark.complete("bpftool btf dump file ") + def test_btf_dump_file(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool btf dump file format ") + def test_btf_dump_file_format(self, completion): + assert completion == "c raw".split() + + @pytest.mark.complete("bpftool btf dump file format raw ") + def test_btf_dump_file_format_raw(self, completion): + assert not completion + + @pytest.mark.complete("bpftool btf dump file format c ") + def test_btf_dump_file_format_c(self, completion): + assert not completion + + # bpftool cgroup + + @pytest.mark.complete("bpftool cgroup ") + def test_cgroup(self, completion): + assert completion == "attach detach help list show tree".split() + + @pytest.mark.complete("bpftool cgroup help ") + def test_cgroup_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup list ") + def test_cgroup_list(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup show ") + def test_cgroup_show(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup show /some_cgroup ") + def test_cgroup_show_xxx(self, completion): + assert completion == "effective" + + @pytest.mark.complete("bpftool cgroup show /some_cgroup effective ") + def test_cgroup_show_xxx_effective(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup tree ") + def test_cgroup_tree(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup tree /some_cgroup_root ") + def test_cgroup_tree_xxx(self, completion): + assert completion == "effective" + + @pytest.mark.complete("bpftool cgroup tree /some_cgroup_root effective ") + def test_cgroup_tree_xxx_effective(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup attach ") + def test_cgroup_attach(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup ") + def test_cgroup_attach_xxx(self, completion): + assert completion == self.cgroup_attach_types + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress ") + def test_cgroup_attach_xxx_type(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress id ") + def test_cgroup_attach_xxx_type_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress id 1 ") + def test_cgroup_attach_xxx_type_id_xxx(self, completion): + assert completion == "multi override".split() + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress name ") + def test_cgroup_attach_xxx_type_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress name some_name ") + def test_cgroup_attach_xxx_type_name_xxx(self, completion): + assert completion == "multi override".split() + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress pinned ") + def test_cgroup_attach_xxx_type_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress pinned /path ") + def test_cgroup_attach_xxx_type_pinned_xxx(self, completion): + assert completion == "multi override".split() + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress tag ") + def test_cgroup_attach_xxx_type_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress tag some_tag ") + def test_cgroup_attach_xxx_type_tag_xxx(self, completion): + assert completion == "multi override".split() + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress tag some_tag multi ") + def test_cgroup_attach_xxx_type_tag_xxx_multi(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup attach /some_cgroup cgroup_inet_ingress tag some_tag override ") + def test_cgroup_attach_xxx_type_tag_xxx_override(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup detach ") + def test_cgroup_detach(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup ") + def test_cgroup_detach_xxx(self, completion): + assert completion == self.cgroup_attach_types + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress ") + def test_cgroup_detach_xxx_type(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress id ") + def test_cgroup_detach_xxx_type_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress id 1 ") + def test_cgroup_detach_xxx_type_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress pinned ") + def test_cgroup_detach_xxx_type_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress pinned /path ") + def test_cgroup_detach_xxx_type_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress tag ") + def test_cgroup_detach_xxx_type_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool cgroup detach /some_cgroup cgroup_inet_ingress tag some_tag ") + def test_cgroup_detach_xxx_type_tag_xxx(self, completion): + assert not completion + + # bpftool feature + + @pytest.mark.complete("bpftool feature ") + def test_feature(self, completion): + assert completion == "help list_builtins probe".split() + + @pytest.mark.complete("bpftool feature help ") + def test_feature_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool feature probe ") + def test_feature_probe(self, completion): + assert completion == "dev full kernel macros unprivileged".split() + + @pytest.mark.complete("bpftool feature probe kernel ") + def test_feature_probe_kernel(self, completion): + assert completion == "full macros unprivileged".split() + + @pytest.mark.complete("bpftool feature probe dev ") + def test_feature_probe_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool feature probe dev some_ifname ") + def test_feature_probe_dev_xxx(self, completion): + assert completion == "full macros unprivileged".split() + + @pytest.mark.complete("bpftool feature probe full ") + def test_feature_probe_full(self, completion): + assert completion == "dev kernel macros unprivileged".split() + + @pytest.mark.complete("bpftool feature probe unprivileged ") + def test_feature_probe_unprivileged(self, completion): + assert completion == "dev full kernel macros".split() + + @pytest.mark.complete("bpftool feature probe full unprivileged ") + def test_feature_probe_full_unprivileged(self, completion): + assert completion == "dev kernel macros".split() + + @pytest.mark.complete("bpftool feature probe macros ") + def test_feature_probe_macros(self, completion): + assert completion == "dev full kernel prefix unprivileged".split() + + @pytest.mark.complete("bpftool feature probe macros prefix ") + def test_feature_probe_macros_prefix(self, completion): + assert not completion + + @pytest.mark.complete("bpftool feature probe macros prefix SOME_PREFIX ") + def test_feature_probe_macros_prefix_xxx(self, completion): + assert completion == "dev full kernel unprivileged".split() + + @pytest.mark.complete("bpftool feature probe dev some_ifname full unprivileged macros prefix SOME_PREFIX ") + def test_feature_probe_dev_xxx_full_unprivileged_macros_prefix_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool feature list_builtins ") + def test_feature_listbuiltins(self, completion): + assert completion == "attach_types helpers link_types map_types prog_types".split() + + @pytest.mark.complete("bpftool feature list_builtins prog_types ") + def test_feature_listbuiltins_progtypes(self, completion): + assert not completion + + @pytest.mark.complete("bpftool feature list_builtins prog_types map_types attach_types link_types helpers ") + def test_feature_listbuiltins_progtypes_maptypes_attachtypes_linktypes_helpers(self, completion): + """Note: bpftool will ignore the arguments after "prog_types".""" + assert not completion + + # bpftool gen + + @pytest.mark.complete("bpftool gen ") + def test_gen(self, completion): + assert completion == "help min_core_btf object skeleton subskeleton".split() + + @pytest.mark.complete("bpftool gen help ") + def test_gen_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool gen object ") + def test_gen_object(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen object /some_output ") + def test_gen_object_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen object /some_output /some_input ") + def test_gen_object_xxx_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen object /some_output /some_input /some_input ") + def test_gen_object_xxx_xxx_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen skeleton ") + def test_gen_skeleton(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen skeleton /some_objfile ") + def test_gen_skeleton_xxx(self, completion): + assert completion == "name" + + @pytest.mark.complete("bpftool gen skeleton /some_objfile name ") + def test_gen_skeleton_xxx_name(self, completion): + assert not completion + + @pytest.mark.complete("bpftool gen skeleton /some_objfile name some_objname ") + def test_gen_skeleton_xxx_name_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool gen subskeleton ") + def test_gen_subskeleton(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen subskeleton /some_objfile ") + def test_gen_subskeleton_xxx(self, completion): + assert completion == "name" + + @pytest.mark.complete("bpftool gen subskeleton /some_objfile name ") + def test_gen_subskeleton_xxx_name(self, completion): + assert not completion + + @pytest.mark.complete("bpftool gen subskeleton /some_objfile name some_objname ") + def test_gen_subskeleton_xxx_name_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool gen min_core_btf ") + def test_gen_mincorebtf(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen min_core_btf /some_input ") + def test_gen_mincorebtf_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen min_core_btf /some_input /some_output ") + def test_gen_mincorebtf_xxx_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen min_core_btf /some_input /some_output /some_objfile ") + def test_gen_mincorebtf_xxx_xxx_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool gen min_core_btf /some_input /some_output /some_objfile /some_objfile ") + def test_gen_mincorebtf_xxx_xxx_xxx_xxx(self, completion): + assert self.all_paths(completion) + + # bpftool iter + + @pytest.mark.complete("bpftool iter ") + def test_iter(self, completion): + assert completion == "help pin".split() + + @pytest.mark.complete("bpftool iter help ") + def test_iter_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool iter pin ") + def test_iter_pin(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool iter pin /some/iterator ") + def test_iter_pin_xxx(self, completion): + assert completion == "map" + + @pytest.mark.complete("bpftool iter pin /some/iterator map ") + def test_iter_pin_xxx_map(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool iter pin /some/iterator map id ") + def test_iter_pin_xxx_map_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool iter pin /some/iterator map id 1 ") + def test_iter_pin_xxx_map_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool iter pin /some/iterator map pinned ") + def test_iter_pin_xxx_map_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool iter pin /some/iterator map pinned /some_map ") + def test_iter_pin_xxx_map_pinned_xxx(self, completion): + assert not completion + + # bpftool link + + @pytest.mark.complete("bpftool link ") + def test_link(self, completion): + assert completion == "detach help list pin show".split() + + @pytest.mark.complete("bpftool link help ") + def test_link_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool link list ") + def test_link_list(self, completion): + assert completion == "id pinned".split() + + @pytest.mark.complete("bpftool link show ") + def test_link_show(self, completion): + assert completion == "id pinned".split() + + @pytest.mark.complete("bpftool link show id ", require_cmd=True) + def test_link_show_id(self, get_bpf_link, completion): + assert self.all_ints(completion) + link_id = get_bpf_link["id"] + if link_id is not None: + assert link_id in completion + + @pytest.mark.complete("bpftool link show id 1 ") + def test_link_show_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool link show pinned ") + def test_link_show_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool link show pinned /some_link ") + def test_link_show_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool link pin ") + def test_link_pin(self, completion): + assert completion == "id pinned".split() + + @pytest.mark.complete("bpftool link pin id ", require_cmd=True) + def test_link_pin_id(self, get_bpf_link, completion): + assert self.all_ints(completion) + link_id = get_bpf_link["id"] + if link_id is not None: + assert link_id in completion + + @pytest.mark.complete("bpftool link pin id 1 ") + def test_link_pin_id_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool link pin pinned ") + def test_link_pin_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool link pin pinned /some_link ") + def test_link_pin_pinned_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool link pin pinned /some_link /some_path ") + def test_link_pin_pinned_xxx_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool link detach ") + def test_link_detach(self, completion): + assert completion == "id pinned".split() + + @pytest.mark.complete("bpftool link detach id ", require_cmd=True) + def test_link_detach_id(self, get_bpf_link, completion): + assert self.all_ints(completion) + link_id = get_bpf_link["id"] + if link_id is not None: + assert link_id in completion + + @pytest.mark.complete("bpftool link detach id 1 ") + def test_link_detach_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool link detach pinned ") + def test_link_detach_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool link detach pinned /some_link ") + def test_link_detach_pinned_xxx(self, completion): + assert not completion + + # bpftool map + + @pytest.mark.complete("bpftool map ") + def test_map(self, completion): + assert completion == [ + "create", + "delete", + "dequeue", + "dump", + "enqueue", + "event_pipe", + "freeze", + "getnext", + "help", + "list", + "lookup", + "peek", + "pin", + "pop", + "push", + "show", + "update", + ] + + @pytest.mark.complete("bpftool map help ") + def test_map_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map create ") + def test_map_create(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool map create /some_map ") + def test_map_create_xxx(self, completion): + assert completion == "entries flags key name offload_dev type value".split() + + @pytest.mark.complete("bpftool map create /some_map type ") + def test_map_create_xxx_type(self, completion): + assert completion == self.map_types + + @pytest.mark.complete("bpftool map create /some_map type hash ") + def test_map_create_xxx_type_xxx(self, completion): + assert completion == "entries flags key name offload_dev value".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps ") + def test_map_create_xxx_type_mom(self, completion): + """Maps of maps get "inner_map" argument as well""" + assert completion == "entries flags inner_map key name offload_dev value".split() + + @pytest.mark.complete("bpftool map create /some_map type hash key ") + def test_map_create_xxx_type_xxx_key(self, completion): + """No "hex" keyword after "key" for creation, we expect a size.""" + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 ") + def test_map_create_xxx_type_xxx_key_xxx(self, completion): + assert completion == "entries flags name offload_dev value".split() + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value ") + def test_map_create_xxx_type_xxx_key_xxx_value(self, completion): + """No "hex" keyword after "value" for creation, we expect a size.""" + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx(self, completion): + assert completion == "entries flags name offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx(self, completion): + assert completion == "flags name offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name(self, completion): + """No completion for "name", we expect user to type a new name.""" + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name some_name ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name_xxx(self, completion): + assert completion == "flags offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx(self, completion): + """Maps of maps get "inner_map" argument as well""" + assert completion == "flags inner_map offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name some_name flags ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name_xxx_flags(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name some_name flags 0x0 ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name_xxx_flags_any(self, completion): + assert completion == "offload_dev" + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name flags 0x0 ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_flags_xxx(self, completion): + """Maps of maps get "inner_map" argument as well""" + assert completion == "inner_map offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_inner_map(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map id ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map id 1 ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_id_xxx(self, completion): + assert completion == "flags offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map pinned ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map pinned /some_map ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_pinned_xxx(self, completion): + assert completion == "flags offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map name ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map name some_name ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_name_xxx(self, completion): + assert completion == "flags offload_dev".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map name some_name flags ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_name_xxx_flags(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name inner_map name some_name flags 0x0 ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_innermap_name_xxx_flags_xxx(self, completion): + assert completion == "offload_dev" + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name some_name offload_dev ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name_xxx_offload_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool map create /some_map type hash key 4 value 4 entries 64 name some_name offload_dev some_ifname ") + def test_map_create_xxx_type_xxx_key_xxx_value_xxx_entries_xxx_name_xxx_offload_dev_some_ifname(self, completion): + assert completion == "flags" + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name offload_dev some_ifname ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_offload_dev_some_ifname(self, completion): + """Maps of maps get "inner_map" argument as well""" + assert completion == "flags inner_map".split() + + @pytest.mark.complete("bpftool map create /some_map type hash_of_maps key 4 value 4 entries 64 name some_name offload_dev some_ifname inner_map name some_name flags 0x0 ") + def test_map_create_xxx_type_mom_key_xxx_value_xxx_entries_xxx_name_xxx_offload_dev_some_ifname_innermap_name_xxx_flags_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map dump ") + def test_map_dump(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map dump id ") + def test_map_dump_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool map dump id 1 ") + def test_map_dump_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map dump pinned ") + def test_map_dump_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool map dump pinned /some_map ") + def test_map_dump_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map dump name ") + def test_map_dump_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool map dump name some_name ") + def test_map_dump_name_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map update ") + def test_map_update(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map update id 1 ") + def test_map_update_id_xxx(self, completion): + assert completion == "key" + + @pytest.mark.complete("bpftool map update id 1 key ") + def test_map_update_id_xxx_key(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map update id 1 key hex ") + def test_map_update_id_xxx_key_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map update id 1 key 0x00 0x00 0x00 0x00 ") + def test_map_update_id_xxx_key_xxx(self, completion): + assert completion == "value" + + @pytest.mark.complete("bpftool map update id 1 key 0x00 0x00 0x00 0x00 value ") + def test_map_update_id_xxx_key_xxx_value(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map update id 1 key 0x00 0x00 0x00 0x00 value hex ") + def test_map_update_id_xxx_key_xxx_value_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map update id 1 key 0x00 0x00 0x00 0x00 value 0x00 0x00 0x00 0x00 ") + def test_map_update_id_xxx_key_xxx_value_xxx(self, completion): + assert completion == "any exist noexist".split() + + @pytest.mark.complete("bpftool map update id 1 key 0x00 0x00 0x00 0x00 value 0x00 0x00 0x00 0x00 any ") + def test_map_update_id_xxx_key_xxx_value_xxx_any(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map lookup ") + def test_map_lookup(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map lookup id 1 ") + def test_map_lookup_id_xxx(self, completion): + assert completion == "key" + + @pytest.mark.complete("bpftool map lookup id 1 key ") + def test_map_lookup_id_xxx_key(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map lookup id 1 key hex ") + def test_map_lookup_id_xxx_key_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map lookup id 1 key 0x00 0x00 0x00 0x00 ") + def test_map_lookup_id_xxx_key_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map getnext ") + def test_map_getnext(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map getnext id 1 ") + def test_map_getnext_id_xxx(self, completion): + assert completion == "key" + + @pytest.mark.complete("bpftool map getnext id 1 key ") + def test_map_getnext_id_xxx_key(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map getnext id 1 key hex ") + def test_map_getnext_id_xxx_key_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map getnext id 1 key 0x00 0x00 0x00 0x00 ") + def test_map_getnext_id_xxx_key_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map delete ") + def test_map_delete(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map delete id 1 ") + def test_map_delete_id_xxx(self, completion): + assert completion == "key" + + @pytest.mark.complete("bpftool map delete id 1 key ") + def test_map_delete_id_xxx_key(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map delete id 1 key hex ") + def test_map_delete_id_xxx_key_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map delete id 1 key 0x00 0x00 0x00 0x00 ") + def test_map_delete_id_xxx_key_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map pin ") + def test_map_pin(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map pin id 1 ") + def test_map_pin_id_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool map pin id 1 /some_path ") + def test_map_pin_id_xxx_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map event_pipe ") + def test_map_event_pipe(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map event_pipe id 1 ") + def test_map_event_pipe_id_xxx(self, completion): + assert completion == "cpu index".split() + + @pytest.mark.complete("bpftool map event_pipe id 1 cpu ") + def test_map_event_pipe_id_xxx_cpu(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map event_pipe id 1 cpu 1 ") + def test_map_event_pipe_id_1_cpu_xxx(self, completion): + assert completion == "index" + + @pytest.mark.complete("bpftool map event_pipe id 1 index ") + def test_map_event_pipe_id_xxx_index(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map event_pipe id 1 index 1 ") + def test_map_event_pipe_id_1_index_xxx(self, completion): + assert completion == "cpu" + + @pytest.mark.complete("bpftool map event_pipe id 1 cpu 1 index ") + def test_map_event_pipe_id_1_cpu_xxx_index(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map event_pipe id 1 cpu 1 index 1 ") + def test_map_event_pipe_id_1_cpu_1_index_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map peek ") + def test_map_peek(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map peek id 1 ") + def test_map_peek_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map push ") + def test_map_push(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map push id 1 ") + def test_map_push_id_xxx(self, completion): + assert completion == "value" + + @pytest.mark.complete("bpftool map push id 1 value ") + def test_map_push_id_xxx_value(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map push id 1 value hex ") + def test_map_push_id_xxx_value_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map push id 1 value 0x00 0x00 0x00 0x00 ") + def test_map_push_id_xxx_value_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map pop ") + def test_map_pop(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map pop id 1 ") + def test_map_pop_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map enqueue ") + def test_map_enqueue(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map enqueue id 1 ") + def test_map_enqueue_id_xxx(self, completion): + assert completion == "value" + + @pytest.mark.complete("bpftool map enqueue id 1 value ") + def test_map_enqueue_id_xxx_value(self, completion): + assert completion == "hex" + + @pytest.mark.complete("bpftool map enqueue id 1 value hex ") + def test_map_enqueue_id_xxx_value_hex(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map enqueue id 1 value 0x00 0x00 0x00 0x00 ") + def test_map_enqueue_id_xxx_value_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map dequeue ") + def test_map_dequeue(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map dequeue id 1 ") + def test_map_dequeue_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool map freeze ") + def test_map_freeze(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool map freeze id 1 ") + def test_map_freeze_id_xxx(self, completion): + assert not completion + + # bpftool net + + @pytest.mark.complete("bpftool net ") + def test_net(self, completion): + assert completion == "attach detach help list show".split() + + @pytest.mark.complete("bpftool net help ") + def test_net_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool net list ") + def test_net_list(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net show ") + def test_net_show(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net show dev ") + def test_net_show_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool net show dev some_ifname ") + def test_net_show_dev_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool net attach ") + def test_net_attach(self, completion): + assert completion == "xdp xdpdrv xdpgeneric xdpoffload".split() + + @pytest.mark.complete("bpftool net attach xdp ") + def test_net_attach_xxx(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool net attach xdp id ") + def test_net_attach_xxx_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool net attach xdp id 1 ") + def test_net_attach_xxx_id_1(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net attach xdp pinned ") + def test_net_attach_xxx_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool net attach xdp pinned /some_prog ") + def test_net_attach_xxx_pinned_xxx(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net attach xdp name ") + def test_net_attach_xxx_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool net attach xdp name some_name ") + def test_net_attach_xxx_name_xxx(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net attach xdp tag ") + def test_net_attach_xxx_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool net attach xdp tag some_tag ") + def test_net_attach_xxx_tag_xxx(self, completion): + assert completion == "dev".split() + + @pytest.mark.complete("bpftool net attach xdp tag some_tag dev ") + def test_net_attach_xxx_tag_xxx_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool net attach xdp tag some_tag dev some_ifname ") + def test_net_attach_xxx_tag_xxx_dev_xxx(self, completion): + assert completion == "overwrite" + + @pytest.mark.complete("bpftool net attach xdp tag some_tag dev some_ifname overwrite ") + def test_net_attach_xxx_tag_xxx_dev_xxx_overwrite(self, completion): + assert not completion + + @pytest.mark.complete("bpftool net detach ") + def test_net_detach(self, completion): + assert completion == "xdp xdpdrv xdpgeneric xdpoffload".split() + + @pytest.mark.complete("bpftool net detach xdp ") + def test_net_detach_xxx(self, completion): + assert completion == "dev" + + @pytest.mark.complete("bpftool net detach xdp dev ") + def test_net_detach_xxx_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool net detach xdp dev some_ifname ") + def test_net_detach_xxx_dev_xxx(self, completion): + assert not completion + + # bpftool perf + + @pytest.mark.complete("bpftool perf ") + def test_perf(self, completion): + assert completion == "help list show".split() + + @pytest.mark.complete("bpftool perf help ") + def test_perf_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool perf list ") + def test_perf_list(self, completion): + assert not completion + + @pytest.mark.complete("bpftool perf show ") + def test_perf_show(self, completion): + assert not completion + + # bpftool prog + + @pytest.mark.complete("bpftool prog ") + def test_prog(self, completion): + assert completion == [ + "attach", + "detach", + "dump", + "help", + "list", + "load", + "loadall", + "pin", + "profile", + "run", + "show", + "tracelog", + ] + + @pytest.mark.complete("bpftool prog help ") + def test_prog_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog list ") + def test_prog_list(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog show ") + def test_prog_show(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog show id ") + def test_prog_show_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog show id 1 ") + def test_prog_show_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog show pinned ") + def test_prog_show_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog show pinned /some_prog ") + def test_prog_show_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog show tag ") + def test_prog_show_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool prog show some_tag ") + def test_prog_show_sometag(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog show name ") + def test_prog_show_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool prog show some_name ") + def test_prog_show_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog dump ") + def test_prog_dump(self, completion): + assert completion == "jited xlated".split() + + @pytest.mark.complete("bpftool prog dump xlated ") + def test_prog_dump_xlated(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog dump xlated id ") + def test_prog_dump_xlated_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog dump xlated id 1 ") + def test_prog_dump_xlated_id_xxx(self, completion): + assert completion == "file linum opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated pinned ") + def test_prog_dump_xlated_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog dump xlated pinned /some_prog ") + def test_prog_dump_xlated_pinned_xxx(self, completion): + assert completion == "file linum opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated tag ") + def test_prog_dump_xlated_tag(self, completion): + assert self.all_tags(completion) + + @pytest.mark.complete("bpftool prog dump xlated tag some_tag ") + def test_prog_dump_xlated_tag_sometag(self, completion): + assert completion == "file linum opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name ") + def test_prog_dump_xlated_name(self, completion): + assert completion + + @pytest.mark.complete("bpftool prog dump xlated name some_name ") + def test_prog_dump_xlated_name_xxx(self, completion): + assert completion == "file linum opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name file ") + def test_prog_dump_xlated_name_xxx_file(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog dump xlated name some_name file /some_file ") + def test_prog_dump_xlated_name_xxx_file_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog dump xlated name some_name linum ") + def test_prog_dump_xlated_name_xxx_linum(self, completion): + assert completion == "opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name opcodes ") + def test_prog_dump_xlated_name_xxx_opcodes(self, completion): + assert completion == "linum visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name linum opcodes ") + def test_prog_dump_xlated_name_xxx_linum_opcodes(self, completion): + assert completion == "visual" + + @pytest.mark.complete("bpftool prog dump xlated name some_name -p ") + def test_prog_dump_xlated_name_xxx_p(self, completion): + """Options -j, --json, -p, --pretty prevent "visual" to appear.""" + assert completion == "file linum opcodes".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name -d ") + def test_prog_dump_xlated_name_xxx_d(self, completion): + assert completion == "file linum opcodes visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name linum --json ") + def test_prog_dump_xlated_name_xxx_linum_json(self, completion): + """Options -j, --json, -p, --pretty prevent "visual" to appear.""" + assert completion == "opcodes" + + @pytest.mark.complete("bpftool prog dump xlated name some_name opcodes --debug ") + def test_prog_dump_xlated_name_xxx_opcodes_debug(self, completion): + assert completion == "linum visual".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name visual ") + def test_prog_dump_xlated_name_xxx_visual(self, completion): + assert completion == "linum opcodes".split() + + @pytest.mark.complete("bpftool prog dump xlated name some_name visual linum ") + def test_prog_dump_xlated_name_xxx_visual_linum(self, completion): + assert completion == "opcodes" + + @pytest.mark.complete("bpftool prog dump xlated name some_name visual linum opcodes ") + def test_prog_dump_xlated_name_xxx_visual_linum_opcodes(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog dump jited name some_name ") + def test_prog_dump_jited_name_xxx(self, completion): + assert completion == "file linum opcodes".split() + + @pytest.mark.complete("bpftool prog dump jited name some_name file ") + def test_prog_dump_jited_name_xxx_file(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog dump jited name some_name file /some_file ") + def test_prog_dump_jited_name_xxx_file_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog dump jited name some_name linum ") + def test_prog_dump_jited_name_xxx_linum(self, completion): + assert completion == "opcodes" + + @pytest.mark.complete("bpftool prog dump jited name some_name opcodes ") + def test_prog_dump_jited_name_xxx_opcodes(self, completion): + assert completion == "linum" + + @pytest.mark.complete("bpftool prog dump jited name some_name linum opcodes ") + def test_prog_dump_jited_name_xxx_linum_opcodes(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog loadall ") + def test_prog_loadall(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load ") + def test_prog_load(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load some_objfile ") + def test_prog_load_xxx(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path ") + def test_prog_load_xxx_xxx(self, completion): + assert completion == "autoattach map offload_dev pinmaps type xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type ") + def test_prog_load_xxx_xxx_type(self, completion): + assert completion == self.prog_types + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp ") + def test_prog_load_xxx_xxx_type_xxx(self, completion): + assert completion == "autoattach map offload_dev pinmaps xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map ") + def test_prog_load_xxx_xxx_type_xxx_map(self, completion): + assert completion == "idx name".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map idx ") + def test_prog_load_xxx_xxx_type_xxx_map_idx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map idx 1 ") + def test_prog_load_xxx_xxx_type_xxx_map_idx_xxx(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool prog load /tmp/bash_comp_test.o /some_path type kprobe map name ") + def test_prog_load_xxx_xxx_type_xxx_map_name(self, get_objfile, completion): + """Test that the map name is correctly extracted from the object file.""" + assert completion == get_objfile["map_name"] + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map offload_dev pinmaps xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "idx name".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map idx ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map_idx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map idx 1 ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map_idx_xxx(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map idx 1 name ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map_idx_xxx_name(self, completion): + """Check that maps can be passed with different kind of references.""" + assert completion + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map idx 1 id ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map_idx_xxx_id(self, completion): + """Check that duplicate "id" is not an issue.""" + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name id 1 map idx 1 id 1 ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_id_xxx_map_idx_xxx_id_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map offload_dev pinmaps xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map offload_dev pinmaps xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp offload_dev ") + def test_prog_load_xxx_xxx_type_xxx_offload_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp offload_dev some_ifname ") + def test_prog_load_xxx_xxx_type_xxx_offload_dev_xxx(self, completion): + assert completion == "autoattach map pinmaps".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp pinmaps ") + def test_prog_load_xxx_xxx_type_xxx_pinmaps(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp pinmaps /some_dir ") + def test_prog_load_xxx_xxx_type_xxx_pinmaps_xxx(self, completion): + assert completion == "autoattach map offload_dev xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp autoattach ") + def test_prog_load_xxx_xxx_type_xxx_autoattach(self, completion): + assert completion == "map offload_dev pinmaps xdpmeta_dev".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map offload_dev ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_offload_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map offload_dev some_ifname ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_offload_dev_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map pinmaps".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map xdpmeta_dev ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_xdpmeta_dev(self, ifnames, completion): + assert all(ifname in completion for ifname in ifnames) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map xdpmeta_dev some_ifname ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_xdpmeta_dev_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map pinmaps".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map offload_dev some_ifname pinmaps ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_offload_dev_xxx_pinmaps(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map offload_dev some_ifname pinmaps /some_dir ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_offload_dev_xxx_pinmaps_xxx(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "autoattach map".split() + + @pytest.mark.complete("bpftool prog load some_objfile /some_path type xdp map name some_name pinned /some_map offload_dev some_ifname pinmaps /some_dir autoattach ") + def test_prog_load_xxx_xxx_type_xxx_map_name_xxx_pinned_xxx_offload_dev_xxx_pinmaps_xxx_autoattach(self, completion): + """Parameter "map" can be specified multiple times.""" + assert completion == "map".split() + + @pytest.mark.complete("bpftool prog attach ") + def test_prog_attach(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog attach id 1 ") + def test_prog_attach_id_xxx(self, completion): + assert completion == self.prog_attach_types + + @pytest.mark.complete("bpftool prog attach id 1 sk_msg_verdict ") + def test_prog_attach_id_xxx_xxx(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool prog attach id 1 sk_msg_verdict id ") + def test_prog_attach_id_xxx_xxx_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog attach id 1 sk_msg_verdict id 1 ") + def test_prog_attach_id_xxx_xxx_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog attach id 1 sk_msg_verdict pinned ") + def test_prog_attach_id_xxx_xxx_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog attach id 1 sk_msg_verdict pinned /some_map ") + def test_prog_attach_id_xxx_xxx_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog detach id 1 ") + def test_prog_detach_id_xxx(self, completion): + assert completion == self.prog_attach_types + + @pytest.mark.complete("bpftool prog detach id 1 sk_msg_verdict ") + def test_prog_detach_id_xxx_xxx(self, completion): + assert completion == "id name pinned".split() + + @pytest.mark.complete("bpftool prog detach id 1 sk_msg_verdict id ") + def test_prog_detach_id_xxx_xxx_id(self, completion): + assert self.all_ints(completion) + + @pytest.mark.complete("bpftool prog detach id 1 sk_msg_verdict id 1 ") + def test_prog_detach_id_xxx_xxx_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog detach id 1 sk_msg_verdict pinned ") + def test_prog_detach_id_xxx_xxx_pinned(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog detach id 1 sk_msg_verdict pinned /some_map ") + def test_prog_detach_id_xxx_xxx_pinned_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog tracelog ") + def test_prog_tracelog(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog run ") + def test_prog_run(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog run id 1 ") + def test_prog_run_id_xxx(self, completion): + assert completion == "ctx_in ctx_out ctx_size_out data_in data_out " \ + "data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in ") + def test_prog_run_id_xxx_datain(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ") + def test_prog_run_id_xxx_datain_xxx(self, completion): + assert completion == "ctx_in ctx_out ctx_size_out data_out " \ + "data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out ") + def test_prog_run_id_xxx_datain_xxx_dataout(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx(self, completion): + assert completion == "ctx_in ctx_out ctx_size_out " \ + "data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file data_size_out ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx_datasizeout(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file data_size_out 64 ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx_datasizeout_xxx(self, completion): + assert completion == "ctx_in ctx_out ctx_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in ") + def test_prog_run_id_xxx_datain_xxx_ctxin(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in /some_file ") + def test_prog_run_id_xxx_datain_xxx_ctxin_xxx(self, completion): + assert completion == "ctx_out ctx_size_out data_out " \ + "data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in /some_file ctx_out ") + def test_prog_run_id_xxx_datain_xxx_ctxin_xxx_ctxout(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in /some_file ctx_out /some_file ") + def test_prog_run_id_xxx_datain_xxx_ctxin_xxx_ctxout_xxx(self, completion): + assert completion == "ctx_size_out data_out " \ + "data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in /some_file ctx_out /some_file ctx_size_out ") + def test_prog_run_id_xxx_datain_xxx_ctxin_xxx_ctxout_xxx_ctxsizeout(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file ctx_in /some_file ctx_out /some_file ctx_size_out 64 ") + def test_prog_run_id_xxx_datain_xxx_ctxin_xxx_ctxout_xxx_ctxsizeout_xxx(self, completion): + assert completion == "data_out data_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file repeat ") + def test_prog_run_id_xxx_datain_xxx_repeat(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file repeat 100 ") + def test_prog_run_id_xxx_datain_xxx_repeat_xxx(self, completion): + assert completion == "ctx_in ctx_out ctx_size_out data_out " \ + "data_size_out".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file data_size_out 64 ctx_in ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx_datasizeout_xxx_ctxin(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file data_size_out 64 ctx_in /some_file ctx_out /some_file ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx_datasizeout_xxx_ctxin_xxx_ctxout_xxx(self, completion): + assert completion == "ctx_size_out repeat".split() + + @pytest.mark.complete("bpftool prog run id 1 data_in /some_file data_out /some_file data_size_out 64 ctx_in /some_file ctx_out /some_file ctx_size_out 64 repeat 100 ") + def test_prog_run_id_xxx_datain_xxx_dataout_xxx_datasizeout_xxx_ctxin_xxx_ctxout_xxx_ctxsizeout_xxx_repeat_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog profile ") + def test_prog_profile(self, completion): + assert completion == "id name pinned tag".split() + + @pytest.mark.complete("bpftool prog profile id 1 ") + def test_prog_profile_id_xxx(self, completion): + assert completion == "cycles dtlb_misses duration instructions " \ + "itlb_misses l1d_loads llc_misses".split() + + @pytest.mark.complete("bpftool prog profile id 1 cycles ") + def test_prog_profile_id_xxx_cycles(self, completion): + assert completion == "dtlb_misses instructions " \ + "itlb_misses l1d_loads llc_misses".split() + + @pytest.mark.complete("bpftool prog profile id 1 duration ") + def test_prog_profile_id_xxx_duration(self, completion): + assert not completion + + @pytest.mark.complete("bpftool prog profile id 1 duration 15 ") + def test_prog_profile_id_xxx_duration_xxx(self, completion): + assert completion == "cycles dtlb_misses instructions " \ + "itlb_misses l1d_loads llc_misses".split() + + @pytest.mark.complete("bpftool prog profile id 1 duration 15 cycles ") + def test_prog_profile_id_xxx_duration_xxx_cycles(self, completion): + assert completion == "dtlb_misses instructions " \ + "itlb_misses l1d_loads llc_misses".split() + + @pytest.mark.complete("bpftool prog profile id 1 duration 15 cycles instructions l1d_loads llc_misses itlb_misses dtlb_misses ") + def test_prog_profile_id_xxx_duration_xxx_cycles_instructions_lgivendloads_llcmisses_itlbmisses_dtlbmisses(self, completion): + assert not completion + + # bpftool struct_ops + + @pytest.mark.complete("bpftool struct_ops ") + def test_structops(self, completion): + assert completion == "dump help list register show unregister".split() + + @pytest.mark.complete("bpftool struct_ops help ") + def test_structops_help(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops list ") + def test_structops_list(self, completion): + assert completion == "id name".split() + + @pytest.mark.complete("bpftool struct_ops show ") + def test_structops_show(self, completion): + assert completion == "id name".split() + + @pytest.mark.complete("bpftool struct_ops show id ") + def test_structops_show_id(self, get_struct_ops, completion): + assert self.all_ints(completion) + struct_ops_id = get_struct_ops["map_id"] + if struct_ops_id is not None: + assert struct_ops_id in completion + + @pytest.mark.complete("bpftool struct_ops show id 1 ") + def test_structops_show_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops show name ") + def test_structops_name(self, get_struct_ops, completion): + struct_ops_name = get_struct_ops["map_name"] + if struct_ops_name is not None: + assert struct_ops_name in completion + + @pytest.mark.complete("bpftool struct_ops show name some_name ") + def test_structops_name_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops dump ") + def test_structops_dump(self, completion): + assert completion == "id name".split() + + @pytest.mark.complete("bpftool struct_ops dump id ") + def test_structops_dump_id(self, get_struct_ops, completion): + assert self.all_ints(completion) + struct_ops_id = get_struct_ops["map_id"] + if struct_ops_id is not None: + assert struct_ops_id in completion + + @pytest.mark.complete("bpftool struct_ops dump id 1 ") + def test_structops_dump_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops dump name ") + def test_structops_dump_name(self, get_struct_ops, completion): + struct_ops_name = get_struct_ops["map_name"] + if struct_ops_name is not None: + assert struct_ops_name in completion + + @pytest.mark.complete("bpftool struct_ops dump name some_name ") + def test_structops_dump_name_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops register ") + def test_structops_register(self, completion): + assert self.all_paths(completion) + + @pytest.mark.complete("bpftool struct_ops register some_objfile ") + def test_structops_register_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops unregister ") + def test_structops_unregister(self, completion): + assert completion == "id name".split() + + @pytest.mark.complete("bpftool struct_ops unregister id ") + def test_structops_unregister_id(self, get_struct_ops, completion): + assert self.all_ints(completion) + struct_ops_id = get_struct_ops["map_id"] + if struct_ops_id is not None: + assert struct_ops_id in completion + + @pytest.mark.complete("bpftool struct_ops unregister id 1 ") + def test_structops_unregister_id_xxx(self, completion): + assert not completion + + @pytest.mark.complete("bpftool struct_ops unregister name ") + def test_structops_unregister_name(self, get_struct_ops, completion): + struct_ops_name = get_struct_ops["map_name"] + if struct_ops_name is not None: + assert struct_ops_name in completion + + @pytest.mark.complete("bpftool struct_ops unregister name some_name ") + def test_structops_unregister_name_xxx(self, completion): + assert not completion diff --git a/src/Makefile b/src/Makefile index 048d2214..d48d22ff 100644 --- a/src/Makefile +++ b/src/Makefile @@ -42,7 +42,7 @@ $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DI $(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT) $(Q)$(MAKE) -C $(BPF_DIR) OBJDIR=$(patsubst %/,%,$(LIBBPF_OUTPUT)) \ - PREFIX=$(LIBBPF_DESTDIR:/=) $(LIBBPF) install_headers + DESTDIR="" PREFIX=$(LIBBPF_DESTDIR:/=) $(LIBBPF) install_headers $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR) $(call QUIET_INSTALL, $@) @@ -50,7 +50,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_ $(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT) $(Q)$(MAKE) -C $(BPF_DIR) OBJDIR=$(patsubst %/,%,$(LIBBPF_BOOTSTRAP_OUTPUT)) \ - PREFIX=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) \ + DESTDIR="" PREFIX=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) \ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" $@ install_headers $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR) @@ -87,6 +87,10 @@ ifneq ($(EXTRA_LDFLAGS),) LDFLAGS += $(EXTRA_LDFLAGS) endif +HOST_CFLAGS := $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\ + $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS))) +HOST_LDFLAGS := $(LDFLAGS) + INSTALL ?= install RM ?= rm -f @@ -172,12 +176,9 @@ ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),) SRCS := $(filter-out jit_disasm.c,$(SRCS)) endif -HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\ - $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS))) - BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool -BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o) +BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o) $(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o @@ -225,14 +226,11 @@ endif CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS) -$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/src/kernel/bpf/disasm.c - $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@ - $(OUTPUT)disasm.o: $(srctree)/src/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@ $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP) - $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@ + $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(HOST_LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@ $(OUTPUT)bpftool: $(OBJS) $(LIBBPF) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ diff --git a/src/Makefile.feature b/src/Makefile.feature index d091cdac..131c67e4 100644 --- a/src/Makefile.feature +++ b/src/Makefile.feature @@ -27,7 +27,7 @@ endif ### feature-clang-bpf-co-re CLANG_BPF_CO_RE_PROBE_CMD = \ - printf '%s\n' 'struct s { int i; } __attribute__((preserve_access_index)); struct s foo;' | \ + printf '%s\n' 'struct s { int i; } __attribute__((preserve_access_index)); struct s foo = {};' | \ $(CLANG) -g -target bpf -S -o - -x c - $(QUIET_STDERR) | grep -q BTF_KIND_VAR ifneq ($(findstring clang-bpf-co-re,$(FEATURE_TESTS)),) diff --git a/src/btf_dumper.c b/src/btf_dumper.c index 1b7f6971..527fe867 100644 --- a/src/btf_dumper.c +++ b/src/btf_dumper.c @@ -127,7 +127,7 @@ static void btf_dumper_ptr(const struct btf_dumper *d, print_ptr_value: if (d->is_plain_text) - jsonw_printf(d->jw, "%p", (void *)value); + jsonw_printf(d->jw, "\"%p\"", (void *)value); else jsonw_printf(d->jw, "%lu", value); } diff --git a/src/cgroup.c b/src/cgroup.c index ac846b08..af6898c0 100644 --- a/src/cgroup.c +++ b/src/cgroup.c @@ -28,13 +28,15 @@ " cgroup_device | cgroup_inet4_bind |\n" \ " cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \ " cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \ - " cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \ - " cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \ - " cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \ - " cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \ - " cgroup_udp6_recvmsg | cgroup_sysctl |\n" \ - " cgroup_getsockopt | cgroup_setsockopt |\n" \ - " cgroup_inet_sock_release }" + " cgroup_inet6_connect | cgroup_unix_connect |\n" \ + " cgroup_inet4_getpeername | cgroup_inet6_getpeername |\n" \ + " cgroup_unix_getpeername | cgroup_inet4_getsockname |\n" \ + " cgroup_inet6_getsockname | cgroup_unix_getsockname |\n" \ + " cgroup_udp4_sendmsg | cgroup_udp6_sendmsg |\n" \ + " cgroup_unix_sendmsg | cgroup_udp4_recvmsg |\n" \ + " cgroup_udp6_recvmsg | cgroup_unix_recvmsg |\n" \ + " cgroup_sysctl | cgroup_getsockopt |\n" \ + " cgroup_setsockopt | cgroup_inet_sock_release }" static unsigned int query_flags; static struct btf *btf_vmlinux; diff --git a/src/feature.c b/src/feature.c index edda4fc2..c754a428 100644 --- a/src/feature.c +++ b/src/feature.c @@ -426,10 +426,6 @@ static void probe_kernel_image_config(const char *define_prefix) { "CONFIG_BPF_STREAM_PARSER", }, /* xt_bpf module for passing BPF programs to netfilter */ { "CONFIG_NETFILTER_XT_MATCH_BPF", }, - /* bpfilter back-end for iptables */ - { "CONFIG_BPFILTER", }, - /* bpftilter module with "user mode helper" */ - { "CONFIG_BPFILTER_UMH", }, /* test_bpf module for BPF tests */ { "CONFIG_TEST_BPF", }, @@ -668,7 +664,8 @@ probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type, probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex); - res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); + res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ") && + !grep(buf, "program of this type cannot use helper "); switch (get_vendor_id(ifindex)) { case 0x19ee: /* Netronome specific */ diff --git a/src/gen.c b/src/gen.c index 04c47745..b3979ddc 100644 --- a/src/gen.c +++ b/src/gen.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -54,11 +55,27 @@ static bool str_has_suffix(const char *str, const char *suffix) return true; } +static const struct btf_type * +resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) +{ + const struct btf_type *t; + + t = skip_mods_and_typedefs(btf, id, NULL); + if (!btf_is_ptr(t)) + return NULL; + + t = skip_mods_and_typedefs(btf, t->type, res_id); + + return btf_is_func_proto(t) ? t : NULL; +} + static void get_obj_name(char *name, const char *file) { - /* Using basename() GNU version which doesn't modify arg. */ - strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); - name[MAX_OBJ_NAME_LEN - 1] = '\0'; + char file_copy[PATH_MAX]; + + /* Using basename() POSIX version to be more portable. */ + strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0'; + strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0'; if (str_has_suffix(name, ".o")) name[strlen(name) - 2] = '\0'; sanitize_identifier(name); @@ -103,6 +120,12 @@ static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; int i, n; + /* recognize hard coded LLVM section name */ + if (strcmp(sec_name, ".addr_space.1") == 0) { + /* this is the name to use in skeleton */ + snprintf(buf, buf_sz, "arena"); + return true; + } for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { const char *pfx = pfxs[i]; @@ -231,8 +254,15 @@ static const struct btf_type *find_type_for_map(struct btf *btf, const char *map return NULL; } -static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) +static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) { + size_t tmp_sz; + + if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) { + snprintf(buf, sz, "arena"); + return true; + } + if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) return false; @@ -257,7 +287,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) bpf_object__for_each_map(map, obj) { /* only generate definitions for memory-mapped internal maps */ - if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) + if (!is_mmapable_map(map, map_ident, sizeof(map_ident))) continue; sec = find_type_for_map(btf, map_ident); @@ -310,7 +340,7 @@ static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name bpf_object__for_each_map(map, obj) { /* only generate definitions for memory-mapped internal maps */ - if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) + if (!is_mmapable_map(map, map_ident, sizeof(map_ident))) continue; sec = find_type_for_map(btf, map_ident); @@ -356,7 +386,7 @@ static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name */ needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); if (needs_typeof) - printf("typeof("); + printf("__typeof__("); err = btf_dump__emit_type_decl(d, var_type_id, &opts); if (err) @@ -487,7 +517,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name) ", obj_name); bpf_object__for_each_map(map, obj) { - if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) + if (!is_mmapable_map(map, map_ident, sizeof(map_ident))) continue; sec = find_type_for_map(btf, map_ident); @@ -703,22 +733,27 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h const void *mmap_data = NULL; size_t mmap_size = 0; - if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + if (!is_mmapable_map(map, ident, sizeof(ident))) continue; codegen("\ \n\ - skel->%1$s = skel_prep_map_data((void *)\"\\ \n\ - ", ident); + { \n\ + static const char data[] __attribute__((__aligned__(8))) = \"\\\n\ + "); mmap_data = bpf_map__initial_value(map, &mmap_size); print_hex(mmap_data, mmap_size); codegen("\ \n\ - \", %1$zd, %2$zd); \n\ - if (!skel->%3$s) \n\ - goto cleanup; \n\ - skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\ - ", bpf_map_mmap_sz(map), mmap_size, ident); + \"; \n\ + \n\ + skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\ + sizeof(data) - 1);\n\ + if (!skel->%1$s) \n\ + goto cleanup; \n\ + skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\ + } \n\ + ", ident, bpf_map_mmap_sz(map)); } codegen("\ \n\ @@ -733,36 +768,34 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h { \n\ struct bpf_load_and_run_opts opts = {}; \n\ int err; \n\ - \n\ - opts.ctx = (struct bpf_loader_ctx *)skel; \n\ - opts.data_sz = %2$d; \n\ - opts.data = (void *)\"\\ \n\ + static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\ ", - obj_name, opts.data_sz); + obj_name); print_hex(opts.data, opts.data_sz); codegen("\ \n\ \"; \n\ + static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\ "); - - codegen("\ - \n\ - opts.insns_sz = %d; \n\ - opts.insns = (void *)\"\\ \n\ - ", - opts.insns_sz); print_hex(opts.insns, opts.insns_sz); codegen("\ \n\ \"; \n\ + \n\ + opts.ctx = (struct bpf_loader_ctx *)skel; \n\ + opts.data_sz = sizeof(opts_data) - 1; \n\ + opts.data = (void *)opts_data; \n\ + opts.insns_sz = sizeof(opts_insn) - 1; \n\ + opts.insns = (void *)opts_insn; \n\ + \n\ err = bpf_load_and_run(&opts); \n\ if (err < 0) \n\ return err; \n\ - ", obj_name); + "); bpf_object__for_each_map(map, obj) { const char *mmap_flags; - if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + if (!is_mmapable_map(map, ident, sizeof(ident))) continue; if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) @@ -851,7 +884,7 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) ", i, bpf_map__name(map), i, ident); /* memory-mapped internal maps */ - if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) { + if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) { printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", i, ident); } @@ -903,6 +936,208 @@ codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_li } } +static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident, + const struct btf_type *map_type, __u32 map_type_id) +{ + LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3); + const struct btf_type *member_type; + __u32 offset, next_offset = 0; + const struct btf_member *m; + struct btf_dump *d = NULL; + const char *member_name; + __u32 member_type_id; + int i, err = 0, n; + int size; + + d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); + if (!d) + return -errno; + + n = btf_vlen(map_type); + for (i = 0, m = btf_members(map_type); i < n; i++, m++) { + member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id); + member_name = btf__name_by_offset(btf, m->name_off); + + offset = m->offset / 8; + if (next_offset < offset) + printf("\t\t\tchar __padding_%d[%d];\n", i, offset - next_offset); + + switch (btf_kind(member_type)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + /* scalar type */ + printf("\t\t\t"); + opts.field_name = member_name; + err = btf_dump__emit_type_decl(d, member_type_id, &opts); + if (err) { + p_err("Failed to emit type declaration for %s: %d", member_name, err); + goto out; + } + printf(";\n"); + + size = btf__resolve_size(btf, member_type_id); + if (size < 0) { + p_err("Failed to resolve size of %s: %d\n", member_name, size); + err = size; + goto out; + } + + next_offset = offset + size; + break; + + case BTF_KIND_PTR: + if (resolve_func_ptr(btf, m->type, NULL)) { + /* Function pointer */ + printf("\t\t\tstruct bpf_program *%s;\n", member_name); + + next_offset = offset + sizeof(void *); + break; + } + /* All pointer types are unsupported except for + * function pointers. + */ + fallthrough; + + default: + /* Unsupported types + * + * Types other than scalar types and function + * pointers are currently not supported in order to + * prevent conflicts in the generated code caused + * by multiple definitions. For instance, if the + * struct type FOO is used in a struct_ops map, + * bpftool has to generate definitions for FOO, + * which may result in conflicts if FOO is defined + * in different skeleton files. + */ + size = btf__resolve_size(btf, member_type_id); + if (size < 0) { + p_err("Failed to resolve size of %s: %d\n", member_name, size); + err = size; + goto out; + } + printf("\t\t\tchar __unsupported_%d[%d];\n", i, size); + + next_offset = offset + size; + break; + } + } + + /* Cannot fail since it must be a struct type */ + size = btf__resolve_size(btf, map_type_id); + if (next_offset < (__u32)size) + printf("\t\t\tchar __padding_end[%d];\n", size - next_offset); + +out: + btf_dump__free(d); + + return err; +} + +/* Generate the pointer of the shadow type for a struct_ops map. + * + * This function adds a pointer of the shadow type for a struct_ops map. + * The members of a struct_ops map can be exported through a pointer to a + * shadow type. The user can access these members through the pointer. + * + * A shadow type includes not all members, only members of some types. + * They are scalar types and function pointers. The function pointers are + * translated to the pointer of the struct bpf_program. The scalar types + * are translated to the original type without any modifiers. + * + * Unsupported types will be translated to a char array to occupy the same + * space as the original field, being renamed as __unsupported_*. The user + * should treat these fields as opaque data. + */ +static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident, + const struct bpf_map *map) +{ + const struct btf_type *map_type; + const char *type_name; + __u32 map_type_id; + int err; + + map_type_id = bpf_map__btf_value_type_id(map); + if (map_type_id == 0) + return -EINVAL; + map_type = btf__type_by_id(btf, map_type_id); + if (!map_type) + return -EINVAL; + + type_name = btf__name_by_offset(btf, map_type->name_off); + + printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name); + + err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id); + if (err) + return err; + + printf("\t\t} *%s;\n", ident); + + return 0; +} + +static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj) +{ + int err, st_ops_cnt = 0; + struct bpf_map *map; + char ident[256]; + + if (!btf) + return 0; + + /* Generate the pointers to shadow types of + * struct_ops maps. + */ + bpf_object__for_each_map(map, obj) { + if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) + continue; + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + + if (st_ops_cnt == 0) /* first struct_ops map */ + printf("\tstruct {\n"); + st_ops_cnt++; + + err = gen_st_ops_shadow_type(obj_name, btf, ident, map); + if (err) + return err; + } + + if (st_ops_cnt) + printf("\t} struct_ops;\n"); + + return 0; +} + +/* Generate the code to initialize the pointers of shadow types. */ +static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj) +{ + struct bpf_map *map; + char ident[256]; + + if (!btf) + return; + + /* Initialize the pointers to_ops shadow types of + * struct_ops maps. + */ + bpf_object__for_each_map(map, obj) { + if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) + continue; + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + codegen("\ + \n\ + obj->struct_ops.%1$s = (__typeof__(obj->struct_ops.%1$s))\n\ + bpf_map__initial_value(obj->maps.%1$s, NULL);\n\ + \n\ + ", ident); + } +} + static int do_skeleton(int argc, char **argv) { char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; @@ -1046,6 +1281,11 @@ static int do_skeleton(int argc, char **argv) printf("\t} maps;\n"); } + btf = bpf_object__btf(obj); + err = gen_st_ops_shadow(obj_name, btf, obj); + if (err) + goto out; + if (prog_cnt) { printf("\tstruct {\n"); bpf_object__for_each_program(prog, obj) { @@ -1069,7 +1309,6 @@ static int do_skeleton(int argc, char **argv) printf("\t} links;\n"); } - btf = bpf_object__btf(obj); if (btf) { err = codegen_datasecs(obj, obj_name); if (err) @@ -1127,6 +1366,12 @@ static int do_skeleton(int argc, char **argv) if (err) \n\ goto err_out; \n\ \n\ + ", obj_name); + + gen_st_ops_shadow_init(btf, obj); + + codegen("\ + \n\ return obj; \n\ err_out: \n\ %1$s__destroy(obj); \n\ @@ -1209,7 +1454,7 @@ static int do_skeleton(int argc, char **argv) codegen("\ \n\ \n\ - s->data = %2$s__elf_bytes(&s->data_sz); \n\ + s->data = %1$s__elf_bytes(&s->data_sz); \n\ \n\ obj->skeleton = s; \n\ return 0; \n\ @@ -1218,12 +1463,12 @@ static int do_skeleton(int argc, char **argv) return err; \n\ } \n\ \n\ - static inline const void *%2$s__elf_bytes(size_t *sz) \n\ + static inline const void *%1$s__elf_bytes(size_t *sz) \n\ { \n\ - *sz = %1$d; \n\ - return (const void *)\"\\ \n\ - " - , file_sz, obj_name); + static const char data[] __attribute__((__aligned__(8))) = \"\\\n\ + ", + obj_name + ); /* embed contents of BPF object file */ print_hex(obj_data, file_sz); @@ -1231,6 +1476,9 @@ static int do_skeleton(int argc, char **argv) codegen("\ \n\ \"; \n\ + \n\ + *sz = sizeof(data) - 1; \n\ + return (const void *)data; \n\ } \n\ \n\ #ifdef __cplusplus \n\ @@ -1383,7 +1631,7 @@ static int do_subskeleton(int argc, char **argv) /* Also count all maps that have a name */ map_cnt++; - if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + if (!is_mmapable_map(map, ident, sizeof(ident))) continue; map_type_id = bpf_map__btf_value_type_id(map); @@ -1433,6 +1681,10 @@ static int do_subskeleton(int argc, char **argv) printf("\t} maps;\n"); } + err = gen_st_ops_shadow(obj_name, btf, obj); + if (err) + goto out; + if (prog_cnt) { printf("\tstruct {\n"); bpf_object__for_each_program(prog, obj) { @@ -1501,7 +1753,7 @@ static int do_subskeleton(int argc, char **argv) /* walk through each symbol and emit the runtime representation */ bpf_object__for_each_map(map, obj) { - if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + if (!is_mmapable_map(map, ident, sizeof(ident))) continue; map_type_id = bpf_map__btf_value_type_id(map); @@ -1544,6 +1796,12 @@ static int do_subskeleton(int argc, char **argv) if (err) \n\ goto err; \n\ \n\ + "); + + gen_st_ops_shadow_init(btf, obj); + + codegen("\ + \n\ return obj; \n\ err: \n\ %1$s__destroy(obj); \n\ diff --git a/src/kernel/bpf/disasm.c b/src/kernel/bpf/disasm.c index 49940c26..bd2e2dd0 100644 --- a/src/kernel/bpf/disasm.c +++ b/src/kernel/bpf/disasm.c @@ -166,6 +166,12 @@ static bool is_movsx(const struct bpf_insn *insn) (insn->off == 8 || insn->off == 16 || insn->off == 32); } +static bool is_addr_space_cast(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && + insn->off == BPF_ADDR_SPACE_CAST; +} + void print_bpf_insn(const struct bpf_insn_cbs *cbs, const struct bpf_insn *insn, bool allow_ptr_leaks) @@ -184,6 +190,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, insn->code, class == BPF_ALU ? 'w' : 'r', insn->dst_reg, class == BPF_ALU ? 'w' : 'r', insn->dst_reg); + } else if (is_addr_space_cast(insn)) { + verbose(cbs->private_data, "(%02x) r%d = addr_space_cast(r%d, %d, %d)\n", + insn->code, insn->dst_reg, + insn->src_reg, ((u32)insn->imm) >> 16, (u16)insn->imm); } else if (BPF_SRC(insn->code) == BPF_X) { verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n", insn->code, class == BPF_ALU ? 'w' : 'r', @@ -322,6 +332,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, } else if (insn->code == (BPF_JMP | BPF_JA)) { verbose(cbs->private_data, "(%02x) goto pc%+d\n", insn->code, insn->off); + } else if (insn->code == (BPF_JMP | BPF_JCOND) && + insn->src_reg == BPF_MAY_GOTO) { + verbose(cbs->private_data, "(%02x) may_goto pc%+d\n", + insn->code, insn->off); } else if (insn->code == (BPF_JMP32 | BPF_JA)) { verbose(cbs->private_data, "(%02x) gotol pc%+d\n", insn->code, insn->imm); diff --git a/src/link.c b/src/link.c index 4b1407b0..afde9d0c 100644 --- a/src/link.c +++ b/src/link.c @@ -249,18 +249,44 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info) return err; } -static int cmp_u64(const void *A, const void *B) +struct addr_cookie { + __u64 addr; + __u64 cookie; +}; + +static int cmp_addr_cookie(const void *A, const void *B) { - const __u64 *a = A, *b = B; + const struct addr_cookie *a = A, *b = B; + + if (a->addr == b->addr) + return 0; + return a->addr < b->addr ? -1 : 1; +} - return *a - *b; +static struct addr_cookie * +get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count) +{ + struct addr_cookie *data; + __u32 i; + + data = calloc(count, sizeof(data[0])); + if (!data) { + p_err("mem alloc failed"); + return NULL; + } + for (i = 0; i < count; i++) { + data[i].addr = addrs[i]; + data[i].cookie = cookies[i]; + } + qsort(data, count, sizeof(data[0]), cmp_addr_cookie); + return data; } static void show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) { + struct addr_cookie *data; __u32 i, j = 0; - __u64 *addrs; jsonw_bool_field(json_wtr, "retprobe", info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN); @@ -268,14 +294,20 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed); jsonw_name(json_wtr, "funcs"); jsonw_start_array(json_wtr); - addrs = u64_to_ptr(info->kprobe_multi.addrs); - qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64); + data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs), + u64_to_ptr(info->kprobe_multi.cookies), + info->kprobe_multi.count); + if (!data) + return; /* Load it once for all. */ if (!dd.sym_count) kernel_syms_load(&dd); + if (!dd.sym_count) + goto error; + for (i = 0; i < dd.sym_count; i++) { - if (dd.sym_mapping[i].address != addrs[j]) + if (dd.sym_mapping[i].address != data[j].addr) continue; jsonw_start_object(json_wtr); jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address); @@ -287,11 +319,45 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) } else { jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module); } + jsonw_uint_field(json_wtr, "cookie", data[j].cookie); jsonw_end_object(json_wtr); if (j++ == info->kprobe_multi.count) break; } jsonw_end_array(json_wtr); +error: + free(data); +} + +static __u64 *u64_to_arr(__u64 val) +{ + return (__u64 *) u64_to_ptr(val); +} + +static void +show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) +{ + __u32 i; + + jsonw_bool_field(json_wtr, "retprobe", + info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN); + jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path)); + jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count); + jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid); + jsonw_name(json_wtr, "funcs"); + jsonw_start_array(json_wtr); + + for (i = 0; i < info->uprobe_multi.count; i++) { + jsonw_start_object(json_wtr); + jsonw_uint_field(json_wtr, "offset", + u64_to_arr(info->uprobe_multi.offsets)[i]); + jsonw_uint_field(json_wtr, "ref_ctr_offset", + u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]); + jsonw_uint_field(json_wtr, "cookie", + u64_to_arr(info->uprobe_multi.cookies)[i]); + jsonw_end_object(json_wtr); + } + jsonw_end_array(json_wtr); } static void @@ -303,6 +369,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr) u64_to_ptr(info->perf_event.kprobe.func_name)); jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset); jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed); + jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie); } static void @@ -312,6 +379,7 @@ show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr) jsonw_string_field(wtr, "file", u64_to_ptr(info->perf_event.uprobe.file_name)); jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset); + jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie); } static void @@ -319,6 +387,7 @@ show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr) { jsonw_string_field(wtr, "tracepoint", u64_to_ptr(info->perf_event.tracepoint.tp_name)); + jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie); } static char *perf_config_hw_cache_str(__u64 config) @@ -395,6 +464,8 @@ show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr) else jsonw_uint_field(wtr, "event_config", config); + jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie); + if (type == PERF_TYPE_HW_CACHE && perf_config) free((void *)perf_config); } @@ -451,6 +522,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) show_link_ifindex_json(info->tcx.ifindex, json_wtr); show_link_attach_type_json(info->tcx.attach_type, json_wtr); break; + case BPF_LINK_TYPE_NETKIT: + show_link_ifindex_json(info->netkit.ifindex, json_wtr); + show_link_attach_type_json(info->netkit.attach_type, json_wtr); + break; case BPF_LINK_TYPE_XDP: show_link_ifindex_json(info->xdp.ifindex, json_wtr); break; @@ -461,6 +536,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) case BPF_LINK_TYPE_KPROBE_MULTI: show_kprobe_multi_json(info, json_wtr); break; + case BPF_LINK_TYPE_UPROBE_MULTI: + show_uprobe_multi_json(info, json_wtr); + break; case BPF_LINK_TYPE_PERF_EVENT: switch (info->perf_event.type) { case BPF_PERF_EVENT_EVENT: @@ -632,8 +710,8 @@ void netfilter_dump_plain(const struct bpf_link_info *info) static void show_kprobe_multi_plain(struct bpf_link_info *info) { + struct addr_cookie *data; __u32 i, j = 0; - __u64 *addrs; if (!info->kprobe_multi.count) return; @@ -645,21 +723,24 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info) printf("func_cnt %u ", info->kprobe_multi.count); if (info->kprobe_multi.missed) printf("missed %llu ", info->kprobe_multi.missed); - addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs); - qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64); + data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs), + u64_to_ptr(info->kprobe_multi.cookies), + info->kprobe_multi.count); + if (!data) + return; /* Load it once for all. */ if (!dd.sym_count) kernel_syms_load(&dd); if (!dd.sym_count) - return; + goto error; - printf("\n\t%-16s %s", "addr", "func [module]"); + printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]"); for (i = 0; i < dd.sym_count; i++) { - if (dd.sym_mapping[i].address != addrs[j]) + if (dd.sym_mapping[i].address != data[j].addr) continue; - printf("\n\t%016lx %s", - dd.sym_mapping[i].address, dd.sym_mapping[i].name); + printf("\n\t%016lx %-16llx %s", + dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name); if (dd.sym_mapping[i].module[0] != '\0') printf(" [%s] ", dd.sym_mapping[i].module); else @@ -668,6 +749,35 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info) if (j++ == info->kprobe_multi.count) break; } +error: + free(data); +} + +static void show_uprobe_multi_plain(struct bpf_link_info *info) +{ + __u32 i; + + if (!info->uprobe_multi.count) + return; + + if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN) + printf("\n\turetprobe.multi "); + else + printf("\n\tuprobe.multi "); + + printf("path %s ", (char *) u64_to_ptr(info->uprobe_multi.path)); + printf("func_cnt %u ", info->uprobe_multi.count); + + if (info->uprobe_multi.pid) + printf("pid %d ", info->uprobe_multi.pid); + + printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies"); + for (i = 0; i < info->uprobe_multi.count; i++) { + printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx", + u64_to_arr(info->uprobe_multi.offsets)[i], + u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i], + u64_to_arr(info->uprobe_multi.cookies)[i]); + } } static void show_perf_event_kprobe_plain(struct bpf_link_info *info) @@ -689,6 +799,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info) printf("+%#x", info->perf_event.kprobe.offset); if (info->perf_event.kprobe.missed) printf(" missed %llu", info->perf_event.kprobe.missed); + if (info->perf_event.kprobe.cookie) + printf(" cookie %llu", info->perf_event.kprobe.cookie); printf(" "); } @@ -705,6 +817,8 @@ static void show_perf_event_uprobe_plain(struct bpf_link_info *info) else printf("\n\tuprobe "); printf("%s+%#x ", buf, info->perf_event.uprobe.offset); + if (info->perf_event.uprobe.cookie) + printf("cookie %llu ", info->perf_event.uprobe.cookie); } static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) @@ -716,6 +830,8 @@ static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) return; printf("\n\ttracepoint %s ", buf); + if (info->perf_event.tracepoint.cookie) + printf("cookie %llu ", info->perf_event.tracepoint.cookie); } static void show_perf_event_event_plain(struct bpf_link_info *info) @@ -737,6 +853,9 @@ static void show_perf_event_event_plain(struct bpf_link_info *info) else printf("%llu ", config); + if (info->perf_event.event.cookie) + printf("cookie %llu ", info->perf_event.event.cookie); + if (type == PERF_TYPE_HW_CACHE && perf_config) free((void *)perf_config); } @@ -791,6 +910,11 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) show_link_ifindex_plain(info->tcx.ifindex); show_link_attach_type_plain(info->tcx.attach_type); break; + case BPF_LINK_TYPE_NETKIT: + printf("\n\t"); + show_link_ifindex_plain(info->netkit.ifindex); + show_link_attach_type_plain(info->netkit.attach_type); + break; case BPF_LINK_TYPE_XDP: printf("\n\t"); show_link_ifindex_plain(info->xdp.ifindex); @@ -798,6 +922,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) case BPF_LINK_TYPE_KPROBE_MULTI: show_kprobe_multi_plain(info); break; + case BPF_LINK_TYPE_UPROBE_MULTI: + show_uprobe_multi_plain(info); + break; case BPF_LINK_TYPE_PERF_EVENT: switch (info->perf_event.type) { case BPF_PERF_EVENT_EVENT: @@ -837,8 +964,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) static int do_show_link(int fd) { + __u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL; struct bpf_link_info info; __u32 len = sizeof(info); + char path_buf[PATH_MAX]; __u64 *addrs = NULL; char buf[PATH_MAX]; int count; @@ -877,6 +1006,47 @@ static int do_show_link(int fd) return -ENOMEM; } info.kprobe_multi.addrs = ptr_to_u64(addrs); + cookies = calloc(count, sizeof(__u64)); + if (!cookies) { + p_err("mem alloc failed"); + free(addrs); + close(fd); + return -ENOMEM; + } + info.kprobe_multi.cookies = ptr_to_u64(cookies); + goto again; + } + } + if (info.type == BPF_LINK_TYPE_UPROBE_MULTI && + !info.uprobe_multi.offsets) { + count = info.uprobe_multi.count; + if (count) { + offsets = calloc(count, sizeof(__u64)); + if (!offsets) { + p_err("mem alloc failed"); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.offsets = ptr_to_u64(offsets); + ref_ctr_offsets = calloc(count, sizeof(__u64)); + if (!ref_ctr_offsets) { + p_err("mem alloc failed"); + free(offsets); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets); + cookies = calloc(count, sizeof(__u64)); + if (!cookies) { + p_err("mem alloc failed"); + free(ref_ctr_offsets); + free(offsets); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.cookies = ptr_to_u64(cookies); + info.uprobe_multi.path = ptr_to_u64(path_buf); + info.uprobe_multi.path_size = sizeof(path_buf); goto again; } } @@ -915,8 +1085,10 @@ static int do_show_link(int fd) else show_link_close_plain(fd, &info); - if (addrs) - free(addrs); + free(ref_ctr_offsets); + free(cookies); + free(offsets); + free(addrs); close(fd); return 0; } diff --git a/src/map.c b/src/map.c index f98f7bbe..b89bd792 100644 --- a/src/map.c +++ b/src/map.c @@ -1463,7 +1463,7 @@ static int do_help(int argc, char **argv) " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n" " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n" " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n" - " task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n" + " task_storage | bloom_filter | user_ringbuf | cgrp_storage | arena }\n" " " HELP_SPEC_OPTIONS " |\n" " {-f|--bpffs} | {-n|--nomount} }\n" "", diff --git a/src/net.c b/src/net.c index 66a8ce8a..968714b4 100644 --- a/src/net.c +++ b/src/net.c @@ -79,6 +79,8 @@ static const char * const attach_type_strings[] = { static const char * const attach_loc_strings[] = { [BPF_TCX_INGRESS] = "tcx/ingress", [BPF_TCX_EGRESS] = "tcx/egress", + [BPF_NETKIT_PRIMARY] = "netkit/primary", + [BPF_NETKIT_PEER] = "netkit/peer", }; const size_t net_attach_type_size = ARRAY_SIZE(attach_type_strings); @@ -506,6 +508,9 @@ static void show_dev_tc_bpf(struct ip_devname_ifindex *dev) { __show_dev_tc_bpf(dev, BPF_TCX_INGRESS); __show_dev_tc_bpf(dev, BPF_TCX_EGRESS); + + __show_dev_tc_bpf(dev, BPF_NETKIT_PRIMARY); + __show_dev_tc_bpf(dev, BPF_NETKIT_PEER); } static int show_dev_tc_bpf_classic(int sock, unsigned int nl_pid, @@ -926,7 +931,7 @@ static int do_help(int argc, char **argv) " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n" " " HELP_SPEC_OPTIONS " }\n" "\n" - "Note: Only xdp, tcx, tc, flow_dissector and netfilter attachments\n" + "Note: Only xdp, tcx, tc, netkit, flow_dissector and netfilter attachments\n" " are currently supported.\n" " For progs attached to cgroups, use \"bpftool cgroup\"\n" " to dump program attachments. For program types\n" diff --git a/src/pids.c b/src/pids.c index 00c77edb..9b898571 100644 --- a/src/pids.c +++ b/src/pids.c @@ -101,7 +101,6 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type) char buf[4096 / sizeof(*e) * sizeof(*e)]; struct pid_iter_bpf *skel; int err, ret, fd = -1, i; - libbpf_print_fn_t default_print; *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL); if (IS_ERR(*map)) { @@ -118,12 +117,18 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type) skel->rodata->obj_type = type; - /* we don't want output polluted with libbpf errors if bpf_iter is not - * supported - */ - default_print = libbpf_set_print(libbpf_print_none); - err = pid_iter_bpf__load(skel); - libbpf_set_print(default_print); + if (!verifier_logs) { + libbpf_print_fn_t default_print; + + /* Unless debug information is on, we don't want the output to + * be polluted with libbpf errors if bpf_iter is not supported. + */ + default_print = libbpf_set_print(libbpf_print_none); + err = pid_iter_bpf__load(skel); + libbpf_set_print(default_print); + } else { + err = pid_iter_bpf__load(skel); + } if (err) { /* too bad, kernel doesn't support BPF iterators yet */ err = 0; diff --git a/src/prog.c b/src/prog.c index 8443a149..9cb42a33 100644 --- a/src/prog.c +++ b/src/prog.c @@ -442,7 +442,7 @@ static void print_prog_header_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses); } -static void print_prog_json(struct bpf_prog_info *info, int fd) +static void print_prog_json(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -461,6 +461,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "uid", info->created_by_uid); } + jsonw_bool_field(json_wtr, "orphaned", orphaned); jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len); if (info->jited_prog_len) { @@ -527,7 +528,7 @@ static void print_prog_header_plain(struct bpf_prog_info *info, int fd) printf("\n"); } -static void print_prog_plain(struct bpf_prog_info *info, int fd) +static void print_prog_plain(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -554,6 +555,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf(" memlock %sB", memlock); free(memlock); + if (orphaned) + printf(" orphaned"); + if (info->nr_map_ids) show_prog_maps(fd, info->nr_map_ids); @@ -581,15 +585,15 @@ static int show_prog(int fd) int err; err = bpf_prog_get_info_by_fd(fd, &info, &len); - if (err) { + if (err && err != -ENODEV) { p_err("can't get prog info: %s", strerror(errno)); return -1; } if (json_output) - print_prog_json(&info, fd); + print_prog_json(&info, fd, err == -ENODEV); else - print_prog_plain(&info, fd); + print_prog_plain(&info, fd, err == -ENODEV); return 0; } @@ -2294,7 +2298,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj) int map_fd; profile_perf_events = calloc( - sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric); + obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int)); if (!profile_perf_events) { p_err("failed to allocate memory for perf_event array: %s", strerror(errno)); @@ -2475,9 +2479,10 @@ static int do_help(int argc, char **argv) " sk_reuseport | flow_dissector | cgroup/sysctl |\n" " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" - " cgroup/getpeername4 | cgroup/getpeername6 |\n" - " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n" - " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n" + " cgroup/connect_unix | cgroup/getpeername4 | cgroup/getpeername6 |\n" + " cgroup/getpeername_unix | cgroup/getsockname4 | cgroup/getsockname6 |\n" + " cgroup/getsockname_unix | cgroup/sendmsg4 | cgroup/sendmsg6 |\n" + " cgroup/sendmsg°unix | cgroup/recvmsg4 | cgroup/recvmsg6 | cgroup/recvmsg_unix |\n" " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n" " struct_ops | fentry | fexit | freplace | sk_lookup }\n" " ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n" diff --git a/src/skeleton/pid_iter.bpf.c b/src/skeleton/pid_iter.bpf.c index 26004f0c..7bdbcac3 100644 --- a/src/skeleton/pid_iter.bpf.c +++ b/src/skeleton/pid_iter.bpf.c @@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx) BPF_LINK_TYPE_PERF_EVENT___local)) { struct bpf_link *link = (struct bpf_link *) file->private_data; - if (link->type == bpf_core_enum_value(enum bpf_link_type___local, - BPF_LINK_TYPE_PERF_EVENT___local)) { + if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local, + BPF_LINK_TYPE_PERF_EVENT___local)) { e.has_bpf_cookie = true; e.bpf_cookie = get_bpf_cookie(link); } diff --git a/src/struct_ops.c b/src/struct_ops.c index 3ebc9fe9..d573f264 100644 --- a/src/struct_ops.c +++ b/src/struct_ops.c @@ -276,6 +276,9 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, res.nr_maps++; + if (wtr) + jsonw_start_array(wtr); + if (func(fd, info, data, wtr)) res.nr_errs++; else if (!wtr && json_output) @@ -288,6 +291,9 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, */ jsonw_null(json_wtr); + if (wtr) + jsonw_end_array(wtr); + done: free(info); close(fd);