diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 55848cb5..c21b4984 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: with: architecture: x86_64 runner: ubuntu-latest - kernels: '[ "amazonlinux2", "centos", "debian", "fedora", "kali1", "kali3", "kali4", "kali5-7", "kali8-9", "mainline", "rocky", "ubuntu-aws", "ubuntu-azure", "ubuntu-gcp", "ubuntu-gke", "ubuntu-oem", "ubuntu-oracle" ]' + kernels: '[ "amazonlinux2", "centos", "debian", "fedora", "mainline", "rocky", "ubuntu-aws", "ubuntu-azure", "ubuntu-gcp", "ubuntu-gke", "ubuntu-oem", "ubuntu-oracle" ]' secrets: inherit multikernel-tester-aarch64: name: Test (aarch64) @@ -47,7 +47,7 @@ jobs: with: architecture: aarch64 runner: ubuntu-latest - kernels: '[ "amazonlinux2", "debian", "fedora", "kali1", "kali3", "kali4", "kali5-7", "kali8-9", "mainline", "rocky", "ubuntu-aws", "ubuntu-azure", "ubuntu-gcp", "ubuntu-gke", "ubuntu-oracle" ]' + kernels: '[ "amazonlinux2", "debian", "fedora", "mainline", "rocky", "ubuntu-aws", "ubuntu-azure", "ubuntu-gcp", "ubuntu-gke", "ubuntu-oracle" ]' secrets: inherit local-build-x86_64: name: Platform (x86_64) diff --git a/.gitignore b/.gitignore index 32257dbe..b320c581 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ testing/testrunner/testrunner testing/test_bins/bin/* testing/init/bin/* testing/kernels +testing/kernel_builder/kernels testing/virtme *.swp diff --git a/GPL/Events/Helpers.h b/GPL/Events/Helpers.h index 42874bf3..9045cd86 100644 --- a/GPL/Events/Helpers.h +++ b/GPL/Events/Helpers.h @@ -117,6 +117,9 @@ const volatile int consumer_pid = 0; #define DECL_FUNC_ARG_EXISTS(func, arg) const volatile bool exists__##func##__##arg##__ = false; #define FUNC_ARG_EXISTS(func, arg) exists__##func##__##arg##__ +#define DECL_FIELD_OFFSET(struct, field) const volatile int off__##struct##__##field##__ = 0; +#define FIELD_OFFSET(struct, field) off__##struct##__##field##__ + // From linux/err.h #define MAX_ERRNO 4095 diff --git a/GPL/Events/Process/Probe.bpf.c b/GPL/Events/Process/Probe.bpf.c index bdc5364a..557c12e8 100644 --- a/GPL/Events/Process/Probe.bpf.c +++ b/GPL/Events/Process/Probe.bpf.c @@ -17,6 +17,9 @@ #include "PathResolver.h" #include "Varlen.h" +/* tty_write */ +DECL_FIELD_OFFSET(iov_iter, __iov); + // Limits on large things we send up as variable length parameters. // // These should be kept _well_ under half the size of the event_buffer_map or @@ -334,7 +337,6 @@ static int output_tty_event(struct ebpf_tty_dev *slave, const void *base, size_t static int tty_write__enter(struct kiocb *iocb, struct iov_iter *from) { - if (is_consumer()) { goto out; } @@ -369,9 +371,15 @@ static int tty_write__enter(struct kiocb *iocb, struct iov_iter *from) goto out; } - u64 nr_segs = BPF_CORE_READ(from, nr_segs); - nr_segs = nr_segs > MAX_NR_SEGS ? MAX_NR_SEGS : nr_segs; - const struct iovec *iov = BPF_CORE_READ(from, iov); + const struct iovec *iov; + if (FIELD_OFFSET(iov_iter, __iov)) { + iov = (const struct iovec *)((char *)from + FIELD_OFFSET(iov_iter, __iov)); + } else { + iov = BPF_CORE_READ(from, iov); + } + + u64 nr_segs = BPF_CORE_READ(from, nr_segs); + nr_segs = nr_segs > MAX_NR_SEGS ? MAX_NR_SEGS : nr_segs; if (nr_segs == 0) { u64 count = BPF_CORE_READ(from, count); @@ -379,7 +387,12 @@ static int tty_write__enter(struct kiocb *iocb, struct iov_iter *from) goto out; } - for (u8 seg = 0; seg < nr_segs; seg++) { + for (int seg = 0; seg < nr_segs; seg++) { + // NOTE(matt): this check needs to be here because the verifier + // detects an infinite loop otherwise. + if (seg >= MAX_NR_SEGS) + goto out; + struct iovec *cur_iov = (struct iovec *)&iov[seg]; const char *base = BPF_CORE_READ(cur_iov, iov_base); size_t len = BPF_CORE_READ(cur_iov, iov_len); diff --git a/Makefile b/Makefile index 1bf0191b..063d0659 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ CONTAINER_PULL_TAG ?= 20221121-1315 CONTAINER_LOCAL_TAG ?= ebpf-builder:${USER}-latest IMAGEPACK_REPOSITORY ?= ghcr.io/elastic/ebpf-imagepack -IMAGEPACK_PULL_TAG ?= 20230310-1158 +IMAGEPACK_PULL_TAG ?= 20231006-0053 ifdef BUILD_CONTAINER_IMAGE CONTAINER_IMAGE = ${CONTAINER_LOCAL_TAG} diff --git a/contrib/libbpf/BPF-CHECKPOINT-COMMIT b/contrib/libbpf/BPF-CHECKPOINT-COMMIT index 331725aa..9cef0b96 100644 --- a/contrib/libbpf/BPF-CHECKPOINT-COMMIT +++ b/contrib/libbpf/BPF-CHECKPOINT-COMMIT @@ -1 +1 @@ -54c3f1a81421f85e60ae2eaae7be3727a09916ee +71b547f561247897a0a14f3082730156c0533fed diff --git a/contrib/libbpf/CHECKPOINT-COMMIT b/contrib/libbpf/CHECKPOINT-COMMIT index a1abd95e..4a7588f2 100644 --- a/contrib/libbpf/CHECKPOINT-COMMIT +++ b/contrib/libbpf/CHECKPOINT-COMMIT @@ -1 +1 @@ -7b43df6c6ec38c9097420902a1c8165c4b25bf70 +2ddade322925641ee2a75f13665c51f2e74d7791 diff --git a/contrib/libbpf/README.md b/contrib/libbpf/README.md index 9861b165..46422940 100644 --- a/contrib/libbpf/README.md +++ b/contrib/libbpf/README.md @@ -173,7 +173,7 @@ bpf-next to Github sync ======================= All the gory details of syncing can be found in `scripts/sync-kernel.sh` -script. +script. See [SYNC.md](SYNC.md) for instruction. Some header files in this repo (`include/linux/*.h`) are reduced versions of their counterpart files at diff --git a/contrib/libbpf/SYNC.md b/contrib/libbpf/SYNC.md new file mode 100644 index 00000000..14a44a78 --- /dev/null +++ b/contrib/libbpf/SYNC.md @@ -0,0 +1,281 @@ + + + + + +Libbpf sync +=========== + +Libbpf *authoritative source code* is developed as part of [bpf-next Linux source +tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) under +`tools/lib/bpf` subdirectory and is periodically synced to Github. + +Most of the mundane mechanical things like bpf and bpf-next tree merge, Git +history transformation, cherry-picking relevant commits, re-generating +auto-generated headers, etc. are taken care by +[sync-kernel.sh script](https://github.com/libbpf/libbpf/blob/master/scripts/sync-kernel.sh). +But occasionally human needs to do few extra things to make everything work +nicely. + +This document goes over the process of syncing libbpf sources from Linux repo +to this Github repository. Feel free to contribute fixes and additions if you +run into new problems not outlined here. + +Setup expectations +------------------ + +Sync script has particular expectation of upstream Linux repo setup. It +expects that current HEAD of that repo points to bpf-next's master branch and +that there is a separate local branch pointing to bpf tree's master branch. +This is important, as the script will automatically merge their histories for +the purpose of libbpf sync. + +Below, we assume that Linux repo is located at `~/linux`, it's current head is +at latest `bpf-next/master`, and libbpf's Github repo is located at +`~/libbpf`, checked out to latest commit on `master` branch. It doesn't matter +from where to run `sync-kernel.sh` script, but we'll be running it from inside +`~/libbpf`. + +``` +$ cd ~/linux && git remote -v | grep -E '^(bpf|bpf-next)' +bpf https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git (fetch) +bpf ssh://git@gitolite.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git +(push) +bpf-next +https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git (fetch) +bpf-next +ssh://git@gitolite.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git (push) +$ git branch -vv | grep -E '^? (master|bpf-master)' +* bpf-master 2d311f480b52 [bpf/master] riscv, bpf: Fix patch_text implicit declaration + master c8ee37bde402 [bpf-next/master] libbpf: Fix bpf_xdp_query() in old kernels +$ git checkout bpf-master && git pull && git checkout master && git pull +... +$ git log --oneline -n1 +c8ee37bde402 (HEAD -> master, bpf-next/master) libbpf: Fix bpf_xdp_query() in old kernels +$ cd ~/libbpf && git checkout master && git pull +Your branch is up to date with 'libbpf/master'. +Already up to date. +``` + +Running setup script +-------------------- + +First step is to always run `sync-kernel.sh` script. It expects three arguments: + +``` +$ scripts/sync-kernel.sh +``` + +Note, that we'll store script's entire output in `/tmp/libbpf-sync.txt` and +put it into PR summary later on. **Please store scripts output and include it +in PR summary for others to check for anything unexpected and suspicious.** + +``` +$ scripts/sync-kernel.sh ~/libbpf ~/linux bpf-master | tee /tmp/libbpf-sync.txt +Dumping existing libbpf commit signatures... +WORKDIR: /home/andriin/libbpf +LINUX REPO: /home/andriin/linux +LIBBPF REPO: /home/andriin/libbpf +... +``` + +Most of the time this will go very uneventful. One expected case when sync +script might require user intervention is if `bpf` tree has some libbpf fixes, +which is nowadays not a very frequent occurence. But if that happens, script +will show you a diff between expected state as of latest bpf-next and synced +Github repo state. And will ask if these changes look good. Please use your +best judgement to verify that differences are indeed from expected `bpf` tree +fixes. E.g., it might look like below: + +``` +Comparing list of files... +Comparing file contents... +--- /home/andriin/linux/include/uapi/linux/netdev.h 2023-02-27 16:54:42.270583372 -0800 ++++ /home/andriin/libbpf/include/uapi/linux/netdev.h 2023-02-27 16:54:34.615530796 -0800 +@@ -19,7 +19,7 @@ + * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP + * in zero copy mode. + * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw +- * oflloading. ++ * offloading. + * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear + * XDP buffer support in the driver napi callback. + * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements +/home/andriin/linux/include/uapi/linux/netdev.h and /home/andriin/libbpf/include/uapi/linux/netdev.h are different! +Unfortunately, there are some inconsistencies, please double check. +Does everything look good? [y/N]: +``` + +If it looks sensible and expected, type `y` and script will proceed. + +If sync is successful, your `~/linux` repo will be left in original state on +the original HEAD commit. `~/libbpf` repo will now be on a new branch, named +`libbpf-sync-` (e.g., `libbpf-sync-2023-02-28T00-53-40.072Z`). + +Push this branch into your fork of `libbpf/libbpf` Github repo and create a PR: + +``` +$ git push --set-upstream origin libbpf-sync-2023-02-28T00-53-40.072Z +Enumerating objects: 130, done. +Counting objects: 100% (115/115), done. +Delta compression using up to 80 threads +Compressing objects: 100% (28/28), done. +Writing objects: 100% (32/32), 5.57 KiB | 1.86 MiB/s, done. +Total 32 (delta 21), reused 0 (delta 0), pack-reused 0 +remote: Resolving deltas: 100% (21/21), completed with 9 local objects. +remote: +remote: Create a pull request for 'libbpf-sync-2023-02-28T00-53-40.072Z' on GitHub by visiting: +remote: https://github.com/anakryiko/libbpf/pull/new/libbpf-sync-2023-02-28T00-53-40.072Z +remote: +To github.com:anakryiko/libbpf.git + * [new branch] libbpf-sync-2023-02-28T00-53-40.072Z -> libbpf-sync-2023-02-28T00-53-40.072Z +Branch 'libbpf-sync-2023-02-28T00-53-40.072Z' set up to track remote branch 'libbpf-sync-2023-02-28T00-53-40.072Z' from 'origin'. +``` + +**Please, adjust PR name to have a properly looking timestamp. Libbpf +maintainers will be very thankful for that!** + +By default Github will turn above branch name into PR with subject "Libbpf sync +2023 02 28 t00 53 40.072 z". Please fix this into a proper timestamp, e.g.: +"Libbpf sync 2023-02-28T00:53:40.072Z". Thank you! + +**Please don't forget to paste contents of /tmp/libbpf-sync.txt into PR +summary!** + +Once PR is created, libbpf CI will run a bunch of tests to check that +everything is good. In simple cases that would be all you'd need to do. In more +complicated cases some extra adjustments might be necessary. + +**Please, keep naming and style consistent.** Prefix CI-related fixes with `ci: ` +prefix. If you had to modify sync script, prefix it with `sync: `. Also make +sure that each such commit has `Signed-off-by: Your Full Name `, +just like you'd do that for Linux upstream patch. Libbpf closely follows kernel +conventions and styling, so please help maintaining that. + +Including new sources +--------------------- + +If entirely new source files (typically `*.c`) were added to the library in the +kernel repository, it may be necessary to add these to the build system +manually (you may notice linker errors otherwise), because the script cannot +handle such changes automatically. To that end, edit `src/Makefile` as +necessary. Commit +[c2495832ced4](https://github.com/libbpf/libbpf/commit/c2495832ced4239bcd376b9954db38a6addd89ca) +is an example of how to go about doing that. + +Similarly, if new public API header files were added, the `Makefile` will need +to be adjusted as well. + +Updating allow/deny lists +------------------------- + +Libbpf CI intentionally runs a subset of latest BPF selftests on old kernel +(4.9 and 5.5, currently). It happens from time to time that some tests that +previously were successfully running on old kernels now don't, typically due to +reliance on some freshly added kernel feature. It might look something like this in [CI logs](https://github.com/libbpf/libbpf/actions/runs/4206303272/jobs/7299609578#step:4:2733): + +``` + All error logs: + serial_test_xdp_info:FAIL:get_xdp_none errno=2 + #283 xdp_info:FAIL + Summary: 49/166 PASSED, 5 SKIPPED, 1 FAILED +``` + +In such case we can either work with upstream to fix test to be compatible with +old kernels, or we'll have to add a test into a denylist (or remove it from +allowlist, like was [done](https://github.com/libbpf/libbpf/commit/ea284299025bf85b85b4923191de6463cd43ccd6) +for the case above). + +``` +$ find . -name '*LIST*' +./ci/vmtest/configs/ALLOWLIST-4.9.0 +./ci/vmtest/configs/DENYLIST-5.5.0 +./ci/vmtest/configs/DENYLIST-latest.s390x +./ci/vmtest/configs/DENYLIST-latest +./ci/vmtest/configs/ALLOWLIST-5.5.0 +``` + +Please determine which tests need to be added/removed from which list. And then +add that as a separate commit. **Please keep using the same branch name, so +that the same PR can be updated.** There is no need to open new PRs for each +such fix. + +Regenerating vmlinux.h header +----------------------------- + +To compile latest BPF selftests against old kernels, we check in pre-generated +[vmlinux.h](https://github.com/libbpf/libbpf/blob/master/.github/actions/build-selftests/vmlinux.h) +header file, located at `.github/actions/build-selftests/vmlinux.h`, which +contains type definitions from latest upstream kernel. When after libbpf sync +upstream BPF selftests require new kernel types, we'd need to regenerate +`vmlinux.h` and check it in as well. + +This will looks something like this in [CI logs](https://github.com/libbpf/libbpf/actions/runs/4198939244/jobs/7283214243#step:4:1903): + +``` + In file included from progs/test_spin_lock_fail.c:5: + /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:73:53: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] + extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, + ^ + /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:81:35: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] + extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, + ^ + /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:90:52: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] + extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; + ^ + 3 errors generated. + make: *** [Makefile:572: /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/test_spin_lock_fail.bpf.o] Error 1 + make: *** Waiting for unfinished jobs.... + Error: Process completed with exit code 2. +``` + +You'll need to build latest upstream kernel from `bpf-next` tree, using BPF +selftest configs. Concat arch-agnostic and arch-specific configs, build kernel, +then use bpftool to dump `vmlinux.h`: + +``` +$ cd ~/linux +$ cat tools/testing/selftests/bpf/config \ + tools/testing/selftests/bpf/config.x86_64 > .config +$ make -j$(nproc) olddefconfig all +... +$ bpftool btf dump file ~/linux/vmlinux format c > ~/libbpf/.github/actions/build-selftests/vmlinux.h +$ cd ~/libbpf && git add . && git commit -s +``` + +Check in generated `vmlinux.h`, don't forget to use `ci: ` commit prefix, add +it on top of sync commits. Push to Github and let libbpf CI do the checking for +you. See [this commit](https://github.com/libbpf/libbpf/commit/34212c94a64df8eeb1dd5d064630a65e1dfd4c20) +for reference. + +Troubleshooting +--------------- + +If something goes wrong and sync script exits early or is terminated early by +user, you might end up with `~/linux` repo on temporary sync-related branch. +Don't worry, though, sync script never destroys repo state, it follows +"copy-on-write" philosophy and creates new branches where necessary. So it's +very easy to restore previous state. So if anything goes wrong, it's easy to +start fresh: + +``` +$ git branch | grep -E 'libbpf-.*Z' + libbpf-baseline-2023-02-28T00-43-35.146Z + libbpf-bpf-baseline-2023-02-28T00-43-35.146Z + libbpf-bpf-tip-2023-02-28T00-43-35.146Z + libbpf-squash-base-2023-02-28T00-43-35.146Z +* libbpf-squash-tip-2023-02-28T00-43-35.146Z +$ git cherry-pick --abort +$ git checkout master && git branch | grep -E 'libbpf-.*Z' | xargs git br -D +Switched to branch 'master' +Your branch is up to date with 'bpf-next/master'. +Deleted branch libbpf-baseline-2023-02-28T00-43-35.146Z (was 951bce29c898). +Deleted branch libbpf-bpf-baseline-2023-02-28T00-43-35.146Z (was 3a70e0d4c9d7). +Deleted branch libbpf-bpf-tip-2023-02-28T00-43-35.146Z (was 2d311f480b52). +Deleted branch libbpf-squash-base-2023-02-28T00-43-35.146Z (was 957f109ef883). +Deleted branch libbpf-squash-tip-2023-02-28T00-43-35.146Z (was be66130d2339). +Deleted branch libbpf-tip-2023-02-28T00-43-35.146Z (was 2d311f480b52). +``` + +You might need to do the same for your `~/libbpf` repo sometimes, depending at +which stage sync script was terminated. diff --git a/contrib/libbpf/assets/libbpf-logo-compact-darkbg.png b/contrib/libbpf/assets/libbpf-logo-compact-darkbg.png deleted file mode 100644 index aaef60eb..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-compact-darkbg.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-compact-mono.png b/contrib/libbpf/assets/libbpf-logo-compact-mono.png deleted file mode 100644 index 51daed11..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-compact-mono.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-compact.png b/contrib/libbpf/assets/libbpf-logo-compact.png deleted file mode 100644 index 99ae3aba..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-compact.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sideways-darkbg.png b/contrib/libbpf/assets/libbpf-logo-sideways-darkbg.png deleted file mode 100644 index 00d6d836..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sideways-darkbg.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sideways-mono.png b/contrib/libbpf/assets/libbpf-logo-sideways-mono.png deleted file mode 100644 index 82f1ce55..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sideways-mono.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sideways.png b/contrib/libbpf/assets/libbpf-logo-sideways.png deleted file mode 100644 index 48186c31..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sideways.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sparse-darkbg.png b/contrib/libbpf/assets/libbpf-logo-sparse-darkbg.png deleted file mode 100644 index 5e574308..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sparse-darkbg.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sparse-mono.png b/contrib/libbpf/assets/libbpf-logo-sparse-mono.png deleted file mode 100644 index b02d432f..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sparse-mono.png and /dev/null differ diff --git a/contrib/libbpf/assets/libbpf-logo-sparse.png b/contrib/libbpf/assets/libbpf-logo-sparse.png deleted file mode 100644 index a3065883..00000000 Binary files a/contrib/libbpf/assets/libbpf-logo-sparse.png and /dev/null differ diff --git a/contrib/libbpf/ci/diffs/0001-s390-define-RUNTIME_DISCARD_EXIT-to-fix-link-error-w.patch b/contrib/libbpf/ci/diffs/0001-s390-define-RUNTIME_DISCARD_EXIT-to-fix-link-error-w.patch new file mode 100644 index 00000000..2f8d84f2 --- /dev/null +++ b/contrib/libbpf/ci/diffs/0001-s390-define-RUNTIME_DISCARD_EXIT-to-fix-link-error-w.patch @@ -0,0 +1,70 @@ +From 6fba14e2ed9d159f76b23fa5c16f3ea99acbc003 Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Thu, 5 Jan 2023 12:13:06 +0900 +Subject: [PATCH] s390: define RUNTIME_DISCARD_EXIT to fix link error with GNU + ld < 2.36 + +Nathan Chancellor reports that the s390 vmlinux fails to link with +GNU ld < 2.36 since commit 99cb0d917ffa ("arch: fix broken BuildID +for arm64 and riscv"). + +It happens for defconfig, or more specifically for CONFIG_EXPOLINE=y. + + $ s390x-linux-gnu-ld --version | head -n1 + GNU ld (GNU Binutils for Debian) 2.35.2 + $ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- allnoconfig + $ ./scripts/config -e CONFIG_EXPOLINE + $ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- olddefconfig + $ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- + `.exit.text' referenced in section `.s390_return_reg' of drivers/base/dd.o: defined in discarded section `.exit.text' of drivers/base/dd.o + make[1]: *** [scripts/Makefile.vmlinux:34: vmlinux] Error 1 + make: *** [Makefile:1252: vmlinux] Error 2 + +arch/s390/kernel/vmlinux.lds.S wants to keep EXIT_TEXT: + + .exit.text : { + EXIT_TEXT + } + +But, at the same time, EXIT_TEXT is thrown away by DISCARD because +s390 does not define RUNTIME_DISCARD_EXIT. + +I still do not understand why the latter wins after 99cb0d917ffa, +but defining RUNTIME_DISCARD_EXIT seems correct because the comment +line in arch/s390/kernel/vmlinux.lds.S says: + + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + +Nathan also found that binutils commit 21401fc7bf67 ("Duplicate output +sections in scripts") cured this issue, so we cannot reproduce it with +binutils 2.36+, but it is better to not rely on it. + +Fixes: 99cb0d917ffa ("arch: fix broken BuildID for arm64 and riscv") +Link: https://lore.kernel.org/all/Y7Jal56f6UBh1abE@dev-arch.thelio-3990X/ +Reported-by: Nathan Chancellor +Signed-off-by: Masahiro Yamada +Link: https://lore.kernel.org/r/20230105031306.1455409-1-masahiroy@kernel.org +Signed-off-by: Heiko Carstens +--- + arch/s390/kernel/vmlinux.lds.S | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S +index 5ea3830af0cc..6e101e6f499d 100644 +--- a/arch/s390/kernel/vmlinux.lds.S ++++ b/arch/s390/kernel/vmlinux.lds.S +@@ -17,6 +17,8 @@ + /* Handle ro_after_init data on our own. */ + #define RO_AFTER_INIT_DATA + ++#define RUNTIME_DISCARD_EXIT ++ + #define EMITS_PT_NOTE + + #include +-- +2.30.2 + diff --git a/contrib/libbpf/ci/diffs/0001-selftests-bpf-Select-CONFIG_FUNCTION_ERROR_INJECTION.patch b/contrib/libbpf/ci/diffs/0001-selftests-bpf-Select-CONFIG_FUNCTION_ERROR_INJECTION.patch new file mode 100644 index 00000000..3ee42c6d --- /dev/null +++ b/contrib/libbpf/ci/diffs/0001-selftests-bpf-Select-CONFIG_FUNCTION_ERROR_INJECTION.patch @@ -0,0 +1,46 @@ +From a8dfde09c90109e3a98af54847e91bde7dc2d5c2 Mon Sep 17 00:00:00 2001 +From: Song Liu +Date: Tue, 13 Dec 2022 14:05:00 -0800 +Subject: [PATCH] selftests/bpf: Select CONFIG_FUNCTION_ERROR_INJECTION +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +BPF selftests require CONFIG_FUNCTION_ERROR_INJECTION to work. However, +CONFIG_FUNCTION_ERROR_INJECTION is no longer 'y' by default after recent +changes. As a result, we are seeing errors like the following from BPF CI: + + bpf_testmod_test_read() is not modifiable + __x64_sys_setdomainname is not sleepable + __x64_sys_getpgid is not sleepable + +Fix this by explicitly selecting CONFIG_FUNCTION_ERROR_INJECTION in the +selftest config. + +Fixes: a4412fdd49dc ("error-injection: Add prompt for function error injection") +Reported-by: Daniel Müller +Signed-off-by: Song Liu +Signed-off-by: Andrii Nakryiko +Signed-off-by: Daniel Borkmann +Acked-by: Daniel Müller +Link: https://lore.kernel.org/bpf/20221213220500.3427947-1-song@kernel.org +Signed-off-by: Daniel Müller +--- + tools/testing/selftests/bpf/config | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config +index 612f69..63cd4a 100644 +--- a/tools/testing/selftests/bpf/config ++++ b/tools/testing/selftests/bpf/config +@@ -16,6 +16,7 @@ CONFIG_CRYPTO_USER_API_HASH=y + CONFIG_DYNAMIC_FTRACE=y + CONFIG_FPROBE=y + CONFIG_FTRACE_SYSCALLS=y ++CONFIG_FUNCTION_ERROR_INJECTION=y + CONFIG_FUNCTION_TRACER=y + CONFIG_GENEVE=y + CONFIG_IKCONFIG=y +-- +2.30.2 + diff --git a/contrib/libbpf/ci/diffs/0001-veth-take-into-account-peer-device-for-NETDEV_XDP_AC.patch b/contrib/libbpf/ci/diffs/0001-veth-take-into-account-peer-device-for-NETDEV_XDP_AC.patch new file mode 100644 index 00000000..b97dba0a --- /dev/null +++ b/contrib/libbpf/ci/diffs/0001-veth-take-into-account-peer-device-for-NETDEV_XDP_AC.patch @@ -0,0 +1,83 @@ +From 8267fc71abb2dc47338570e56dd3473a58313fce Mon Sep 17 00:00:00 2001 +From: Lorenzo Bianconi +Date: Mon, 17 Apr 2023 23:53:22 +0200 +Subject: [PATCH] veth: take into account peer device for + NETDEV_XDP_ACT_NDO_XMIT xdp_features flag + +For veth pairs, NETDEV_XDP_ACT_NDO_XMIT is supported by the current +device if the peer one is running a XDP program or if it has GRO enabled. +Fix the xdp_features flags reporting considering peer device and not +current one for NETDEV_XDP_ACT_NDO_XMIT. + +Fixes: fccca038f300 ("veth: take into account device reconfiguration for xdp_features flag") +Signed-off-by: Lorenzo Bianconi +Link: https://lore.kernel.org/r/4f1ca6f6f6b42ae125bfdb5c7782217c83968b2e.1681767806.git.lorenzo@kernel.org +Signed-off-by: Alexei Starovoitov +--- + drivers/net/veth.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/veth.c b/drivers/net/veth.c +index e1b38fbf1dd9..4b3c6647edc6 100644 +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -1262,11 +1262,12 @@ static void veth_set_xdp_features(struct net_device *dev) + + peer = rtnl_dereference(priv->peer); + if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { ++ struct veth_priv *priv_peer = netdev_priv(peer); + xdp_features_t val = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG; + +- if (priv->_xdp_prog || veth_gro_requested(dev)) ++ if (priv_peer->_xdp_prog || veth_gro_requested(peer)) + val |= NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG; + xdp_set_features_flag(dev, val); +@@ -1504,19 +1505,23 @@ static int veth_set_features(struct net_device *dev, + { + netdev_features_t changed = features ^ dev->features; + struct veth_priv *priv = netdev_priv(dev); ++ struct net_device *peer; + int err; + + if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) + return 0; + ++ peer = rtnl_dereference(priv->peer); + if (features & NETIF_F_GRO) { + err = veth_napi_enable(dev); + if (err) + return err; + +- xdp_features_set_redirect_target(dev, true); ++ if (peer) ++ xdp_features_set_redirect_target(peer, true); + } else { +- xdp_features_clear_redirect_target(dev); ++ if (peer) ++ xdp_features_clear_redirect_target(peer); + veth_napi_del(dev); + } + return 0; +@@ -1598,13 +1603,13 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, + peer->max_mtu = max_mtu; + } + +- xdp_features_set_redirect_target(dev, true); ++ xdp_features_set_redirect_target(peer, true); + } + + if (old_prog) { + if (!prog) { +- if (!veth_gro_requested(dev)) +- xdp_features_clear_redirect_target(dev); ++ if (peer && !veth_gro_requested(dev)) ++ xdp_features_clear_redirect_target(peer); + + if (dev->flags & IFF_UP) + veth_disable_xdp(dev); +-- +2.34.1 + diff --git a/contrib/libbpf/ci/vmtest/configs/ALLOWLIST-5.5.0 b/contrib/libbpf/ci/vmtest/configs/ALLOWLIST-5.5.0 index 87f72f92..d673590e 100644 --- a/contrib/libbpf/ci/vmtest/configs/ALLOWLIST-5.5.0 +++ b/contrib/libbpf/ci/vmtest/configs/ALLOWLIST-5.5.0 @@ -16,7 +16,6 @@ global_data global_data_init global_func_args hashmap -l4lb_all legacy_printk linked_funcs linked_maps @@ -50,6 +49,5 @@ tcp_rtt tp_attach_query usdt/urand_pid_attach xdp -xdp_info xdp_noinline xdp_perf diff --git a/contrib/libbpf/ci/vmtest/configs/DENYLIST-latest b/contrib/libbpf/ci/vmtest/configs/DENYLIST-latest index e69de29b..ebebe9fb 100644 --- a/contrib/libbpf/ci/vmtest/configs/DENYLIST-latest +++ b/contrib/libbpf/ci/vmtest/configs/DENYLIST-latest @@ -0,0 +1 @@ +decap_sanity # weird failure with decap_sanity_ns netns already existing, TBD diff --git a/contrib/libbpf/docs/index.rst b/contrib/libbpf/docs/index.rst index f9b3b252..7545a204 100644 --- a/contrib/libbpf/docs/index.rst +++ b/contrib/libbpf/docs/index.rst @@ -2,23 +2,32 @@ .. _libbpf: +====== libbpf ====== +If you are looking to develop BPF applications using the libbpf library, this +directory contains important documentation that you should read. + +To get started, it is recommended to begin with the :doc:`libbpf Overview +` document, which provides a high-level understanding of the +libbpf APIs and their usage. This will give you a solid foundation to start +exploring and utilizing the various features of libbpf to develop your BPF +applications. + .. toctree:: :maxdepth: 1 + libbpf_overview API Documentation program_types libbpf_naming_convention libbpf_build -This is documentation for libbpf, a userspace library for loading and -interacting with bpf programs. -All general BPF questions, including kernel functionality, libbpf APIs and -their application, should be sent to bpf@vger.kernel.org mailing list. -You can `subscribe `_ to the -mailing list search its `archive `_. -Please search the archive before asking new questions. It very well might -be that this was already addressed or answered before. +All general BPF questions, including kernel functionality, libbpf APIs and their +application, should be sent to bpf@vger.kernel.org mailing list. You can +`subscribe `_ to the mailing list +search its `archive `_. Please search the archive +before asking new questions. It may be that this was already addressed or +answered before. diff --git a/contrib/libbpf/docs/libbpf_naming_convention.rst b/contrib/libbpf/docs/libbpf_naming_convention.rst index c5ac97f3..b5b41b61 100644 --- a/contrib/libbpf/docs/libbpf_naming_convention.rst +++ b/contrib/libbpf/docs/libbpf_naming_convention.rst @@ -83,8 +83,8 @@ This prevents from accidentally exporting a symbol, that is not supposed to be a part of ABI what, in turn, improves both libbpf developer- and user-experiences. -ABI versionning ---------------- +ABI versioning +-------------- To make future ABI extensions possible libbpf ABI is versioned. Versioning is implemented by ``libbpf.map`` version script that is @@ -148,7 +148,7 @@ API documentation convention The libbpf API is documented via comments above definitions in header files. These comments can be rendered by doxygen and sphinx for well organized html output. This section describes the -convention in which these comments should be formated. +convention in which these comments should be formatted. Here is an example from btf.h: diff --git a/contrib/libbpf/docs/libbpf_overview.rst b/contrib/libbpf/docs/libbpf_overview.rst new file mode 100644 index 00000000..f36a2d4f --- /dev/null +++ b/contrib/libbpf/docs/libbpf_overview.rst @@ -0,0 +1,228 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +libbpf Overview +=============== + +libbpf is a C-based library containing a BPF loader that takes compiled BPF +object files and prepares and loads them into the Linux kernel. libbpf takes the +heavy lifting of loading, verifying, and attaching BPF programs to various +kernel hooks, allowing BPF application developers to focus only on BPF program +correctness and performance. + +The following are the high-level features supported by libbpf: + +* Provides high-level and low-level APIs for user space programs to interact + with BPF programs. The low-level APIs wrap all the bpf system call + functionality, which is useful when users need more fine-grained control + over the interactions between user space and BPF programs. +* Provides overall support for the BPF object skeleton generated by bpftool. + The skeleton file simplifies the process for the user space programs to access + global variables and work with BPF programs. +* Provides BPF-side APIS, including BPF helper definitions, BPF maps support, + and tracing helpers, allowing developers to simplify BPF code writing. +* Supports BPF CO-RE mechanism, enabling BPF developers to write portable + BPF programs that can be compiled once and run across different kernel + versions. + +This document will delve into the above concepts in detail, providing a deeper +understanding of the capabilities and advantages of libbpf and how it can help +you develop BPF applications efficiently. + +BPF App Lifecycle and libbpf APIs +================================== + +A BPF application consists of one or more BPF programs (either cooperating or +completely independent), BPF maps, and global variables. The global +variables are shared between all BPF programs, which allows them to cooperate on +a common set of data. libbpf provides APIs that user space programs can use to +manipulate the BPF programs by triggering different phases of a BPF application +lifecycle. + +The following section provides a brief overview of each phase in the BPF life +cycle: + +* **Open phase**: In this phase, libbpf parses the BPF + object file and discovers BPF maps, BPF programs, and global variables. After + a BPF app is opened, user space apps can make additional adjustments + (setting BPF program types, if necessary; pre-setting initial values for + global variables, etc.) before all the entities are created and loaded. + +* **Load phase**: In the load phase, libbpf creates BPF + maps, resolves various relocations, and verifies and loads BPF programs into + the kernel. At this point, libbpf validates all the parts of a BPF application + and loads the BPF program into the kernel, but no BPF program has yet been + executed. After the load phase, it’s possible to set up the initial BPF map + state without racing with the BPF program code execution. + +* **Attachment phase**: In this phase, libbpf + attaches BPF programs to various BPF hook points (e.g., tracepoints, kprobes, + cgroup hooks, network packet processing pipeline, etc.). During this + phase, BPF programs perform useful work such as processing + packets, or updating BPF maps and global variables that can be read from user + space. + +* **Tear down phase**: In the tear down phase, + libbpf detaches BPF programs and unloads them from the kernel. BPF maps are + destroyed, and all the resources used by the BPF app are freed. + +BPF Object Skeleton File +======================== + +BPF skeleton is an alternative interface to libbpf APIs for working with BPF +objects. Skeleton code abstract away generic libbpf APIs to significantly +simplify code for manipulating BPF programs from user space. Skeleton code +includes a bytecode representation of the BPF object file, simplifying the +process of distributing your BPF code. With BPF bytecode embedded, there are no +extra files to deploy along with your application binary. + +You can generate the skeleton header file ``(.skel.h)`` for a specific object +file by passing the BPF object to the bpftool. The generated BPF skeleton +provides the following custom functions that correspond to the BPF lifecycle, +each of them prefixed with the specific object name: + +* ``__open()`` – creates and opens BPF application (```` stands for + the specific bpf object name) +* ``__load()`` – instantiates, loads,and verifies BPF application parts +* ``__attach()`` – attaches all auto-attachable BPF programs (it’s + optional, you can have more control by using libbpf APIs directly) +* ``__destroy()`` – detaches all BPF programs and + frees up all used resources + +Using the skeleton code is the recommended way to work with bpf programs. Keep +in mind, BPF skeleton provides access to the underlying BPF object, so whatever +was possible to do with generic libbpf APIs is still possible even when the BPF +skeleton is used. It's an additive convenience feature, with no syscalls, and no +cumbersome code. + +Other Advantages of Using Skeleton File +--------------------------------------- + +* BPF skeleton provides an interface for user space programs to work with BPF + global variables. The skeleton code memory maps global variables as a struct + into user space. The struct interface allows user space programs to initialize + BPF programs before the BPF load phase and fetch and update data from user + space afterward. + +* The ``skel.h`` file reflects the object file structure by listing out the + available maps, programs, etc. BPF skeleton provides direct access to all the + BPF maps and BPF programs as struct fields. This eliminates the need for + string-based lookups with ``bpf_object_find_map_by_name()`` and + ``bpf_object_find_program_by_name()`` APIs, reducing errors due to BPF source + code and user-space code getting out of sync. + +* The embedded bytecode representation of the object file ensures that the + skeleton and the BPF object file are always in sync. + +BPF Helpers +=========== + +libbpf provides BPF-side APIs that BPF programs can use to interact with the +system. The BPF helpers definition allows developers to use them in BPF code as +any other plain C function. For example, there are helper functions to print +debugging messages, get the time since the system was booted, interact with BPF +maps, manipulate network packets, etc. + +For a complete description of what the helpers do, the arguments they take, and +the return value, see the `bpf-helpers +`_ man page. + +BPF CO-RE (Compile Once – Run Everywhere) +========================================= + +BPF programs work in the kernel space and have access to kernel memory and data +structures. One limitation that BPF applications come across is the lack of +portability across different kernel versions and configurations. `BCC +`_ is one of the solutions for BPF +portability. However, it comes with runtime overhead and a large binary size +from embedding the compiler with the application. + +libbpf steps up the BPF program portability by supporting the BPF CO-RE concept. +BPF CO-RE brings together BTF type information, libbpf, and the compiler to +produce a single executable binary that you can run on multiple kernel versions +and configurations. + +To make BPF programs portable libbpf relies on the BTF type information of the +running kernel. Kernel also exposes this self-describing authoritative BTF +information through ``sysfs`` at ``/sys/kernel/btf/vmlinux``. + +You can generate the BTF information for the running kernel with the following +command: + +:: + + $ bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h + +The command generates a ``vmlinux.h`` header file with all kernel types +(:doc:`BTF types <../btf>`) that the running kernel uses. Including +``vmlinux.h`` in your BPF program eliminates dependency on system-wide kernel +headers. + +libbpf enables portability of BPF programs by looking at the BPF program’s +recorded BTF type and relocation information and matching them to BTF +information (vmlinux) provided by the running kernel. libbpf then resolves and +matches all the types and fields, and updates necessary offsets and other +relocatable data to ensure that BPF program’s logic functions correctly for a +specific kernel on the host. BPF CO-RE concept thus eliminates overhead +associated with BPF development and allows developers to write portable BPF +applications without modifications and runtime source code compilation on the +target machine. + +The following code snippet shows how to read the parent field of a kernel +``task_struct`` using BPF CO-RE and libbf. The basic helper to read a field in a +CO-RE relocatable manner is ``bpf_core_read(dst, sz, src)``, which will read +``sz`` bytes from the field referenced by ``src`` into the memory pointed to by +``dst``. + +.. code-block:: C + :emphasize-lines: 6 + + //... + struct task_struct *task = (void *)bpf_get_current_task(); + struct task_struct *parent_task; + int err; + + err = bpf_core_read(&parent_task, sizeof(void *), &task->parent); + if (err) { + /* handle error */ + } + + /* parent_task contains the value of task->parent pointer */ + +In the code snippet, we first get a pointer to the current ``task_struct`` using +``bpf_get_current_task()``. We then use ``bpf_core_read()`` to read the parent +field of task struct into the ``parent_task`` variable. ``bpf_core_read()`` is +just like ``bpf_probe_read_kernel()`` BPF helper, except it records information +about the field that should be relocated on the target kernel. i.e, if the +``parent`` field gets shifted to a different offset within +``struct task_struct`` due to some new field added in front of it, libbpf will +automatically adjust the actual offset to the proper value. + +Getting Started with libbpf +=========================== + +Check out the `libbpf-bootstrap `_ +repository with simple examples of using libbpf to build various BPF +applications. + +See also `libbpf API documentation +`_. + +libbpf and Rust +=============== + +If you are building BPF applications in Rust, it is recommended to use the +`Libbpf-rs `_ library instead of bindgen +bindings directly to libbpf. Libbpf-rs wraps libbpf functionality in +Rust-idiomatic interfaces and provides libbpf-cargo plugin to handle BPF code +compilation and skeleton generation. Using Libbpf-rs will make building user +space part of the BPF application easier. Note that the BPF program themselves +must still be written in plain C. + +Additional Documentation +======================== + +* `Program types and ELF Sections `_ +* `API naming convention `_ +* `Building libbpf `_ +* `API documentation Convention `_ diff --git a/contrib/libbpf/include/uapi/linux/bpf.h b/contrib/libbpf/include/uapi/linux/bpf.h index bc1a3d23..4b20a726 100644 --- a/contrib/libbpf/include/uapi/linux/bpf.h +++ b/contrib/libbpf/include/uapi/linux/bpf.h @@ -1033,6 +1033,7 @@ enum bpf_attach_type { BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, BPF_LSM_CGROUP, + BPF_STRUCT_OPS, __MAX_BPF_ATTACH_TYPE }; @@ -1108,7 +1109,7 @@ enum bpf_link_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) -/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and @@ -1156,6 +1157,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded + * program becomes device-bound but can access XDP metadata. + */ +#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) + /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ @@ -1261,6 +1267,9 @@ enum { /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), + +/* Create a map that will be registered/unregesitered by the backed bpf_link */ + BPF_F_LINK = (1U << 13), }; /* Flags for BPF_PROG_QUERY. */ @@ -1398,6 +1407,11 @@ union bpf_attr { __aligned_u64 fd_array; /* array of FDs */ __aligned_u64 core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 log_true_size; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1483,6 +1497,11 @@ union bpf_attr { __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 btf_log_true_size; }; struct { @@ -1502,7 +1521,10 @@ union bpf_attr { } task_fd_query; struct { /* struct used by BPF_LINK_CREATE command */ - __u32 prog_fd; /* eBPF program to attach */ + union { + __u32 prog_fd; /* eBPF program to attach */ + __u32 map_fd; /* struct_ops to attach */ + }; union { __u32 target_fd; /* object to attach to */ __u32 target_ifindex; /* target ifindex */ @@ -1543,12 +1565,23 @@ union bpf_attr { struct { /* struct used by BPF_LINK_UPDATE command */ __u32 link_fd; /* link fd */ - /* new program fd to update link with */ - __u32 new_prog_fd; + union { + /* new program fd to update link with */ + __u32 new_prog_fd; + /* new struct_ops map fd to update link with */ + __u32 new_map_fd; + }; __u32 flags; /* extra flags */ - /* expected link's program fd; is specified only if - * BPF_F_REPLACE flag is set in flags */ - __u32 old_prog_fd; + union { + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags. + */ + __u32 old_prog_fd; + /* expected link's map fd; is specified only + * if BPF_F_REPLACE flag is set. + */ + __u32 old_map_fd; + }; } link_update; struct { @@ -1642,17 +1675,17 @@ union bpf_attr { * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) - * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. - * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is - * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * Lines are discarded while *\/sys/kernel/tracing/trace* is + * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in - * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * @@ -2647,6 +2680,11 @@ union bpf_attr { * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the * L2 type as Ethernet. * + * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: + * Indicate the new IP header version after decapsulating the outer + * IP header. Used when the inner and outer IP versions are different. + * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be @@ -2791,7 +2829,7 @@ union bpf_attr { * * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description - * For en eBPF program attached to a perf event, retrieve the + * For an eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see @@ -3124,6 +3162,11 @@ union bpf_attr { * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -4954,6 +4997,12 @@ union bpf_attr { * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * + * *flags* can be one of: + * + * **BPF_F_TIMER_ABS** + * Start the timer in absolute expire value instead of the + * default relative one. + * * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier @@ -5310,11 +5359,22 @@ union bpf_attr { * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. - * *flags* is currently unused. + * + * *flags* must be 0 except for skb-type dynptrs. + * + * For skb-type dynptrs: + * * All data slices of the dynptr are automatically + * invalidated after **bpf_dynptr_write**\ (). This is + * because writing may pull the skb and change the + * underlying packet buffer. + * + * * For *flags*, please see the flags accepted by + * **bpf_skb_store_bytes**\ (). * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - * is a read-only dynptr or if *flags* is not 0. + * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, + * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). * * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) * Description @@ -5322,6 +5382,9 @@ union bpf_attr { * * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. + * + * skb and xdp type dynptrs may not use bpf_dynptr_data. They should + * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length @@ -5807,6 +5870,8 @@ enum { BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7), + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8), }; enum { @@ -6342,6 +6407,9 @@ struct bpf_link_info { struct { __u32 ifindex; } xdp; + struct { + __u32 map_id; + } struct_ops; }; } __attribute__((aligned(8))); @@ -6738,6 +6806,7 @@ struct bpf_raw_tracepoint_args { enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), + BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), }; enum { @@ -6905,6 +6974,21 @@ struct bpf_list_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_rb_root { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + +struct bpf_rb_node { + __u64 :64; + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + +struct bpf_refcount { + __u32 :32; +} __attribute__((aligned(4))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. @@ -7054,4 +7138,21 @@ struct bpf_core_relo { enum bpf_core_relo_kind kind; }; +/* + * Flags to control bpf_timer_start() behaviour. + * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is + * relative to current time. + */ +enum { + BPF_F_TIMER_ABS = (1ULL << 0), +}; + +/* BPF numbers iterator state */ +struct bpf_iter_num { + /* opaque iterator state; having __u64 here allows to preserve correct + * alignment requirements in vmlinux.h, generated from BTF + */ + __u64 __opaque[1]; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/contrib/libbpf/include/uapi/linux/fcntl.h b/contrib/libbpf/include/uapi/linux/fcntl.h index 2f86b2ad..e8c07da5 100644 --- a/contrib/libbpf/include/uapi/linux/fcntl.h +++ b/contrib/libbpf/include/uapi/linux/fcntl.h @@ -43,6 +43,7 @@ #define F_SEAL_GROW 0x0004 /* prevent file from growing */ #define F_SEAL_WRITE 0x0008 /* prevent writes */ #define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */ +#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */ /* (1U << 31) is reserved for signed error codes */ /* diff --git a/contrib/libbpf/include/uapi/linux/if_link.h b/contrib/libbpf/include/uapi/linux/if_link.h index 901d98b8..39e659c8 100644 --- a/contrib/libbpf/include/uapi/linux/if_link.h +++ b/contrib/libbpf/include/uapi/linux/if_link.h @@ -605,6 +605,7 @@ enum { IFLA_MACVLAN_MACADDR_COUNT, IFLA_MACVLAN_BC_QUEUE_LEN, IFLA_MACVLAN_BC_QUEUE_LEN_USED, + IFLA_MACVLAN_BC_CUTOFF, __IFLA_MACVLAN_MAX, }; diff --git a/contrib/libbpf/include/uapi/linux/netdev.h b/contrib/libbpf/include/uapi/linux/netdev.h new file mode 100644 index 00000000..639524b5 --- /dev/null +++ b/contrib/libbpf/include/uapi/linux/netdev.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/netdev.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_NETDEV_H +#define _UAPI_LINUX_NETDEV_H + +#define NETDEV_FAMILY_NAME "netdev" +#define NETDEV_FAMILY_VERSION 1 + +/** + * enum netdev_xdp_act + * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers + * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX) + * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT + * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements + * ndo_xdp_xmit callback. + * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP + * in zero copy mode. + * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw + * offloading. + * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear + * XDP buffer support in the driver napi callback. + * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements + * non-linear XDP buffer support in ndo_xdp_xmit callback. + */ +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + + NETDEV_XDP_ACT_MASK = 127, +}; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD, + NETDEV_A_DEV_XDP_FEATURES, + + __NETDEV_A_DEV_MAX, + NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF, + NETDEV_CMD_DEV_DEL_NTF, + NETDEV_CMD_DEV_CHANGE_NTF, + + __NETDEV_CMD_MAX, + NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) +}; + +#define NETDEV_MCGRP_MGMT "mgmt" + +#endif /* _UAPI_LINUX_NETDEV_H */ diff --git a/contrib/libbpf/include/uapi/linux/openat2.h b/contrib/libbpf/include/uapi/linux/openat2.h new file mode 100644 index 00000000..a5feb760 --- /dev/null +++ b/contrib/libbpf/include/uapi/linux/openat2.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_OPENAT2_H +#define _UAPI_LINUX_OPENAT2_H + +#include + +/* + * Arguments for how openat2(2) should open the target path. If only @flags and + * @mode are non-zero, then openat2(2) operates very similarly to openat(2). + * + * However, unlike openat(2), unknown or invalid bits in @flags result in + * -EINVAL rather than being silently ignored. @mode must be zero unless one of + * {O_CREAT, O_TMPFILE} are set. + * + * @flags: O_* flags. + * @mode: O_CREAT/O_TMPFILE file mode. + * @resolve: RESOLVE_* flags. + */ +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +/* how->resolve flags for openat2(2). */ +#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings + (includes bind-mounts). */ +#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style + "magic-links". */ +#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks + (implies OEXT_NO_MAGICLINKS) */ +#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like + "..", symlinks, and absolute + paths which escape the dirfd. */ +#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." + be scoped inside the dirfd + (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ + +#endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/contrib/libbpf/include/uapi/linux/perf_event.h b/contrib/libbpf/include/uapi/linux/perf_event.h index ccb7f5da..37675437 100644 --- a/contrib/libbpf/include/uapi/linux/perf_event.h +++ b/contrib/libbpf/include/uapi/linux/perf_event.h @@ -374,6 +374,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ #define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -515,6 +516,8 @@ struct perf_event_attr { * truncated accordingly on 32 bit architectures. */ __u64 sig_data; + + __u64 config3; /* extension of config2 */ }; /* diff --git a/contrib/libbpf/scripts/sync-kernel.sh b/contrib/libbpf/scripts/sync-kernel.sh index eb09ef13..55ffcd15 100755 --- a/contrib/libbpf/scripts/sync-kernel.sh +++ b/contrib/libbpf/scripts/sync-kernel.sh @@ -43,8 +43,10 @@ PATH_MAP=( \ [tools/include/uapi/linux/bpf.h]=include/uapi/linux/bpf.h \ [tools/include/uapi/linux/btf.h]=include/uapi/linux/btf.h \ [tools/include/uapi/linux/fcntl.h]=include/uapi/linux/fcntl.h \ + [tools/include/uapi/linux/openat2.h]=include/uapi/linux/openat2.h \ [tools/include/uapi/linux/if_link.h]=include/uapi/linux/if_link.h \ [tools/include/uapi/linux/if_xdp.h]=include/uapi/linux/if_xdp.h \ + [tools/include/uapi/linux/netdev.h]=include/uapi/linux/netdev.h \ [tools/include/uapi/linux/netlink.h]=include/uapi/linux/netlink.h \ [tools/include/uapi/linux/pkt_cls.h]=include/uapi/linux/pkt_cls.h \ [tools/include/uapi/linux/pkt_sched.h]=include/uapi/linux/pkt_sched.h \ @@ -260,7 +262,7 @@ if ((${COMMIT_CNT} <= 0)); then fi # Exclude baseline commit and generate nice cover letter with summary -git format-patch ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG} --cover-letter -o ${TMP_DIR}/patches +git format-patch --no-signature ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG} --cover-letter -o ${TMP_DIR}/patches # Now is time to re-apply libbpf-related linux patches to libbpf repo cd_to ${LIBBPF_REPO} diff --git a/contrib/libbpf/src/Makefile b/contrib/libbpf/src/Makefile index cdca7646..09abe8cf 100644 --- a/contrib/libbpf/src/Makefile +++ b/contrib/libbpf/src/Makefile @@ -9,8 +9,8 @@ else endif LIBBPF_MAJOR_VERSION := 1 -LIBBPF_MINOR_VERSION := 1 -LIBBPF_PATCH_VERSION := 0 +LIBBPF_MINOR_VERSION := 2 +LIBBPF_PATCH_VERSION := 2 LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION) LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0 LIBBPF_MAP_VERSION := $(shell grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | sort -rV | head -n1 | cut -d'_' -f2) @@ -52,7 +52,7 @@ STATIC_OBJDIR := $(OBJDIR)/staticobjs OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \ nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \ btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \ - relo_core.o usdt.o + relo_core.o usdt.o zip.o SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS)) STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS)) diff --git a/contrib/libbpf/src/bpf.c b/contrib/libbpf/src/bpf.c index 9aff98f4..128ac723 100644 --- a/contrib/libbpf/src/bpf.c +++ b/contrib/libbpf/src/bpf.c @@ -230,9 +230,9 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt, int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, - const struct bpf_prog_load_opts *opts) + struct bpf_prog_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, fd_array); + const size_t attr_sz = offsetofend(union bpf_attr, log_true_size); void *finfo = NULL, *linfo = NULL; const char *func_info, *line_info; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; @@ -290,10 +290,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type, if (!!log_buf != !!log_size) return libbpf_err(-EINVAL); - if (log_level > (4 | 2 | 1)) - return libbpf_err(-EINVAL); - if (log_level && !log_buf) - return libbpf_err(-EINVAL); func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); @@ -316,6 +312,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) return fd; @@ -356,6 +353,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) goto done; } @@ -370,6 +368,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, attr.log_level = 1; fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); } done: /* free() doesn't affect errno, so we don't need to restore it */ @@ -794,11 +793,17 @@ int bpf_link_update(int link_fd, int new_prog_fd, if (!OPTS_VALID(opts, bpf_link_update_opts)) return libbpf_err(-EINVAL); + if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.link_update.link_fd = link_fd; attr.link_update.new_prog_fd = new_prog_fd; attr.link_update.flags = OPTS_GET(opts, flags, 0); - attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + if (OPTS_GET(opts, old_prog_fd, 0)) + attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + else if (OPTS_GET(opts, old_map_fd, 0)) + attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); return libbpf_err_errno(ret); @@ -1044,6 +1049,26 @@ int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) return libbpf_err_errno(err); } +int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(prog_fd, info, info_len); +} + +int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(map_fd, info, info_len); +} + +int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(btf_fd, info, info_len); +} + +int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(link_fd, info, info_len); +} + int bpf_raw_tracepoint_open(const char *name, int prog_fd) { const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); @@ -1058,9 +1083,9 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return libbpf_err_errno(fd); } -int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) +int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size); union bpf_attr attr; char *log_buf; size_t log_size; @@ -1103,6 +1128,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa attr.btf_log_level = 1; fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } + + OPTS_SET(opts, log_true_size, attr.btf_log_true_size); return libbpf_err_errno(fd); } diff --git a/contrib/libbpf/src/bpf.h b/contrib/libbpf/src/bpf.h index 7468978d..a2c09138 100644 --- a/contrib/libbpf/src/bpf.h +++ b/contrib/libbpf/src/bpf.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* - * common eBPF ELF operations. + * Common BPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan @@ -96,13 +96,20 @@ struct bpf_prog_load_opts { __u32 log_level; __u32 log_size; char *log_buf; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + * If kernel doesn't support this feature, log_size is left unchanged. + */ + __u32 log_true_size; + size_t :0; }; -#define bpf_prog_load_opts__last_field log_buf +#define bpf_prog_load_opts__last_field log_true_size LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, - const struct bpf_prog_load_opts *opts); + struct bpf_prog_load_opts *opts); /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 @@ -117,11 +124,18 @@ struct bpf_btf_load_opts { char *log_buf; __u32 log_level; __u32 log_size; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + * If kernel doesn't support this feature, log_size is left unchanged. + */ + __u32 log_true_size; + size_t :0; }; -#define bpf_btf_load_opts__last_field log_size +#define bpf_btf_load_opts__last_field log_true_size LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, - const struct bpf_btf_load_opts *opts); + struct bpf_btf_load_opts *opts); LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); @@ -336,8 +350,9 @@ struct bpf_link_update_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; /* extra flags */ __u32 old_prog_fd; /* expected old program FD */ + __u32 old_map_fd; /* expected old map FD */ }; -#define bpf_link_update_opts__last_field old_prog_fd +#define bpf_link_update_opts__last_field old_map_fd LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd, const struct bpf_link_update_opts *opts); @@ -387,6 +402,74 @@ LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); +/** + * @brief **bpf_prog_get_info_by_fd()** obtains information about the BPF + * program corresponding to *prog_fd*. + * + * Populates up to *info_len* bytes of *info* and updates *info_len* with the + * actual number of bytes written to *info*. + * + * @param prog_fd BPF program file descriptor + * @param info pointer to **struct bpf_prog_info** that will be populated with + * BPF program information + * @param info_len pointer to the size of *info*; on success updated with the + * number of bytes written to *info* + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len); + +/** + * @brief **bpf_map_get_info_by_fd()** obtains information about the BPF + * map corresponding to *map_fd*. + * + * Populates up to *info_len* bytes of *info* and updates *info_len* with the + * actual number of bytes written to *info*. + * + * @param map_fd BPF map file descriptor + * @param info pointer to **struct bpf_map_info** that will be populated with + * BPF map information + * @param info_len pointer to the size of *info*; on success updated with the + * number of bytes written to *info* + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len); + +/** + * @brief **bpf_btf_get_info_by_fd()** obtains information about the + * BTF object corresponding to *btf_fd*. + * + * Populates up to *info_len* bytes of *info* and updates *info_len* with the + * actual number of bytes written to *info*. + * + * @param btf_fd BTF object file descriptor + * @param info pointer to **struct bpf_btf_info** that will be populated with + * BTF object information + * @param info_len pointer to the size of *info*; on success updated with the + * number of bytes written to *info* + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len); + +/** + * @brief **bpf_btf_get_info_by_fd()** obtains information about the BPF + * link corresponding to *link_fd*. + * + * Populates up to *info_len* bytes of *info* and updates *info_len* with the + * actual number of bytes written to *info*. + * + * @param link_fd BPF link file descriptor + * @param info pointer to **struct bpf_link_info** that will be populated with + * BPF link information + * @param info_len pointer to the size of *info*; on success updated with the + * number of bytes written to *info* + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len); + struct bpf_prog_query_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 query_flags; diff --git a/contrib/libbpf/src/bpf_core_read.h b/contrib/libbpf/src/bpf_core_read.h index 496e6a8e..1ac57bb7 100644 --- a/contrib/libbpf/src/bpf_core_read.h +++ b/contrib/libbpf/src/bpf_core_read.h @@ -364,7 +364,7 @@ enum bpf_enum_value_kind { /* Non-CO-RE variant of BPF_CORE_READ_INTO() */ #define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \ - ___core_read(bpf_probe_read, bpf_probe_read, \ + ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \ dst, (src), a, ##__VA_ARGS__) \ }) @@ -400,7 +400,7 @@ enum bpf_enum_value_kind { /* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */ #define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \ - ___core_read(bpf_probe_read_str, bpf_probe_read, \ + ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \ dst, (src), a, ##__VA_ARGS__) \ }) diff --git a/contrib/libbpf/src/bpf_gen_internal.h b/contrib/libbpf/src/bpf_gen_internal.h index 22330893..fdf44403 100644 --- a/contrib/libbpf/src/bpf_gen_internal.h +++ b/contrib/libbpf/src/bpf_gen_internal.h @@ -11,6 +11,7 @@ struct ksym_relo_desc { int insn_idx; bool is_weak; bool is_typeless; + bool is_ld64; }; struct ksym_desc { @@ -24,6 +25,7 @@ struct ksym_desc { bool typeless; }; int insn; + bool is_ld64; }; struct bpf_gen { @@ -65,7 +67,7 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, - bool is_typeless, int kind, int insn_idx); + bool is_typeless, bool is_ld64, int kind, int insn_idx); void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo); void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx); diff --git a/contrib/libbpf/src/bpf_helper_defs.h b/contrib/libbpf/src/bpf_helper_defs.h index 9bcaca11..8eae417e 100644 --- a/contrib/libbpf/src/bpf_helper_defs.h +++ b/contrib/libbpf/src/bpf_helper_defs.h @@ -118,17 +118,17 @@ static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5; * * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) - * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. - * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is - * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * Lines are discarded while *\/sys/kernel/tracing/trace* is + * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in - * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * @@ -1277,6 +1277,11 @@ static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *op * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the * L2 type as Ethernet. * + * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: + * Indicate the new IP header version after decapsulating the outer + * IP header. Used when the inner and outer IP versions are different. + * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be @@ -1445,7 +1450,7 @@ static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf /* * bpf_perf_prog_read_value * - * For en eBPF program attached to a perf event, retrieve the + * For an eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see @@ -1830,6 +1835,11 @@ static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void * * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -4018,6 +4028,12 @@ static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * + * *flags* can be one of: + * + * **BPF_F_TIMER_ABS** + * Start the timer in absolute expire value instead of the + * default relative one. + * * * Returns * 0 on success. @@ -4498,12 +4514,23 @@ static long (*bpf_dynptr_read)(void *dst, __u32 len, const struct bpf_dynptr *sr * * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. - * *flags* is currently unused. + * + * *flags* must be 0 except for skb-type dynptrs. + * + * For skb-type dynptrs: + * * All data slices of the dynptr are automatically + * invalidated after **bpf_dynptr_write**\ (). This is + * because writing may pull the skb and change the + * underlying packet buffer. + * + * * For *flags*, please see the flags accepted by + * **bpf_skb_store_bytes**\ (). * * Returns * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - * is a read-only dynptr or if *flags* is not 0. + * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, + * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). */ static long (*bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, __u64 flags) = (void *) 202; @@ -4515,6 +4542,9 @@ static long (*bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * + * skb and xdp type dynptrs may not use bpf_dynptr_data. They should + * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. + * * Returns * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length diff --git a/contrib/libbpf/src/bpf_helpers.h b/contrib/libbpf/src/bpf_helpers.h index d37c4fe2..929a3bac 100644 --- a/contrib/libbpf/src/bpf_helpers.h +++ b/contrib/libbpf/src/bpf_helpers.h @@ -109,7 +109,7 @@ * This is a variable-specific variant of more global barrier(). */ #ifndef barrier_var -#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var)) +#define barrier_var(var) asm volatile("" : "+r"(var)) #endif /* @@ -174,8 +174,13 @@ enum libbpf_tristate { #define __kconfig __attribute__((section(".kconfig"))) #define __ksym __attribute__((section(".ksyms"))) +#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted"))) #define __kptr __attribute__((btf_type_tag("kptr"))) -#define __kptr_ref __attribute__((btf_type_tag("kptr_ref"))) + +#define bpf_ksym_exists(sym) ({ \ + _Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \ + !!sym; \ +}) #ifndef ___bpf_concat #define ___bpf_concat(a, b) a ## b @@ -286,4 +291,107 @@ enum libbpf_tristate { /* Helper macro to print out debug messages */ #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup())), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ + #endif diff --git a/contrib/libbpf/src/bpf_tracing.h b/contrib/libbpf/src/bpf_tracing.h index 2972dc25..6fb3d0f9 100644 --- a/contrib/libbpf/src/bpf_tracing.h +++ b/contrib/libbpf/src/bpf_tracing.h @@ -32,6 +32,9 @@ #elif defined(__TARGET_ARCH_arc) #define bpf_target_arc #define bpf_target_defined +#elif defined(__TARGET_ARCH_loongarch) + #define bpf_target_loongarch + #define bpf_target_defined #else /* Fall back to what the compiler says */ @@ -62,6 +65,9 @@ #elif defined(__arc__) #define bpf_target_arc #define bpf_target_defined +#elif defined(__loongarch__) + #define bpf_target_loongarch + #define bpf_target_defined #endif /* no compiler target */ #endif @@ -72,6 +78,10 @@ #if defined(bpf_target_x86) +/* + * https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI + */ + #if defined(__KERNEL__) || defined(__VMLINUX_H__) #define __PT_PARM1_REG di @@ -79,25 +89,40 @@ #define __PT_PARM3_REG dx #define __PT_PARM4_REG cx #define __PT_PARM5_REG r8 +#define __PT_PARM6_REG r9 +/* + * Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64 + * comments in Linux sources. And refer to syscall(2) manpage. + */ +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG r10 +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + #define __PT_RET_REG sp #define __PT_FP_REG bp #define __PT_RC_REG ax #define __PT_SP_REG sp #define __PT_IP_REG ip -/* syscall uses r10 for PARM4 */ -#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) -#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) #else #ifdef __i386__ +/* i386 kernel is built with -mregparm=3 */ #define __PT_PARM1_REG eax #define __PT_PARM2_REG edx #define __PT_PARM3_REG ecx -/* i386 kernel is built with -mregparm=3 */ -#define __PT_PARM4_REG __unsupported__ -#define __PT_PARM5_REG __unsupported__ +/* i386 syscall ABI is very different, refer to syscall(2) manpage */ +#define __PT_PARM1_SYSCALL_REG ebx +#define __PT_PARM2_SYSCALL_REG ecx +#define __PT_PARM3_SYSCALL_REG edx +#define __PT_PARM4_SYSCALL_REG esi +#define __PT_PARM5_SYSCALL_REG edi +#define __PT_PARM6_SYSCALL_REG ebp + #define __PT_RET_REG esp #define __PT_FP_REG ebp #define __PT_RC_REG eax @@ -111,14 +136,20 @@ #define __PT_PARM3_REG rdx #define __PT_PARM4_REG rcx #define __PT_PARM5_REG r8 +#define __PT_PARM6_REG r9 + +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG r10 +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + #define __PT_RET_REG rsp #define __PT_FP_REG rbp #define __PT_RC_REG rax #define __PT_SP_REG rsp #define __PT_IP_REG rip -/* syscall uses r10 for PARM4 */ -#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) -#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) #endif /* __i386__ */ @@ -126,6 +157,10 @@ #elif defined(bpf_target_s390) +/* + * https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf + */ + struct pt_regs___s390 { unsigned long orig_gpr2; }; @@ -137,21 +172,42 @@ struct pt_regs___s390 { #define __PT_PARM3_REG gprs[4] #define __PT_PARM4_REG gprs[5] #define __PT_PARM5_REG gprs[6] -#define __PT_RET_REG grps[14] + +#define __PT_PARM1_SYSCALL_REG orig_gpr2 +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG gprs[7] +#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) +#define PT_REGS_PARM1_CORE_SYSCALL(x) \ + BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG) + +#define __PT_RET_REG gprs[14] #define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG gprs[2] #define __PT_SP_REG gprs[15] #define __PT_IP_REG psw.addr -#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) -#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2) #elif defined(bpf_target_arm) +/* + * https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers + */ + #define __PT_PARM1_REG uregs[0] #define __PT_PARM2_REG uregs[1] #define __PT_PARM3_REG uregs[2] #define __PT_PARM4_REG uregs[3] -#define __PT_PARM5_REG uregs[4] + +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG uregs[4] +#define __PT_PARM6_SYSCALL_REG uregs[5] +#define __PT_PARM7_SYSCALL_REG uregs[6] + #define __PT_RET_REG uregs[14] #define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG uregs[0] @@ -160,6 +216,10 @@ struct pt_regs___s390 { #elif defined(bpf_target_arm64) +/* + * https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers + */ + struct pt_regs___arm64 { unsigned long orig_x0; }; @@ -171,21 +231,49 @@ struct pt_regs___arm64 { #define __PT_PARM3_REG regs[2] #define __PT_PARM4_REG regs[3] #define __PT_PARM5_REG regs[4] +#define __PT_PARM6_REG regs[5] +#define __PT_PARM7_REG regs[6] +#define __PT_PARM8_REG regs[7] + +#define __PT_PARM1_SYSCALL_REG orig_x0 +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG +#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) +#define PT_REGS_PARM1_CORE_SYSCALL(x) \ + BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG) + #define __PT_RET_REG regs[30] #define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG regs[0] #define __PT_SP_REG sp #define __PT_IP_REG pc -#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) -#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0) #elif defined(bpf_target_mips) +/* + * N64 ABI is assumed right now. + * https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions + */ + #define __PT_PARM1_REG regs[4] #define __PT_PARM2_REG regs[5] #define __PT_PARM3_REG regs[6] #define __PT_PARM4_REG regs[7] #define __PT_PARM5_REG regs[8] +#define __PT_PARM6_REG regs[9] +#define __PT_PARM7_REG regs[10] +#define __PT_PARM8_REG regs[11] + +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */ +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */ + #define __PT_RET_REG regs[31] #define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG regs[2] @@ -194,26 +282,58 @@ struct pt_regs___arm64 { #elif defined(bpf_target_powerpc) +/* + * http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14, + * section "Function Calling Sequence") + */ + #define __PT_PARM1_REG gpr[3] #define __PT_PARM2_REG gpr[4] #define __PT_PARM3_REG gpr[5] #define __PT_PARM4_REG gpr[6] #define __PT_PARM5_REG gpr[7] +#define __PT_PARM6_REG gpr[8] +#define __PT_PARM7_REG gpr[9] +#define __PT_PARM8_REG gpr[10] + +/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx +#define __PT_PARM1_SYSCALL_REG orig_gpr3 +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG +#if !defined(__arch64__) +#define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */ +#endif + #define __PT_RET_REG regs[31] #define __PT_FP_REG __unsupported__ #define __PT_RC_REG gpr[3] #define __PT_SP_REG sp #define __PT_IP_REG nip -/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ -#define PT_REGS_SYSCALL_REGS(ctx) ctx #elif defined(bpf_target_sparc) +/* + * https://en.wikipedia.org/wiki/Calling_convention#SPARC + */ + #define __PT_PARM1_REG u_regs[UREG_I0] #define __PT_PARM2_REG u_regs[UREG_I1] #define __PT_PARM3_REG u_regs[UREG_I2] #define __PT_PARM4_REG u_regs[UREG_I3] #define __PT_PARM5_REG u_regs[UREG_I4] +#define __PT_PARM6_REG u_regs[UREG_I5] + +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + #define __PT_RET_REG u_regs[UREG_I7] #define __PT_FP_REG __unsupported__ #define __PT_RC_REG u_regs[UREG_I0] @@ -227,22 +347,42 @@ struct pt_regs___arm64 { #elif defined(bpf_target_riscv) +/* + * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions + */ + #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG a0 #define __PT_PARM2_REG a1 #define __PT_PARM3_REG a2 #define __PT_PARM4_REG a3 #define __PT_PARM5_REG a4 +#define __PT_PARM6_REG a5 +#define __PT_PARM7_REG a6 +#define __PT_PARM8_REG a7 + +/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + #define __PT_RET_REG ra #define __PT_FP_REG s0 #define __PT_RC_REG a0 #define __PT_SP_REG sp #define __PT_IP_REG pc -/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ -#define PT_REGS_SYSCALL_REGS(ctx) ctx #elif defined(bpf_target_arc) +/* + * Section "Function Calling Sequence" (page 24): + * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf + */ + /* arc provides struct user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG scratch.r0 @@ -250,13 +390,57 @@ struct pt_regs___arm64 { #define __PT_PARM3_REG scratch.r2 #define __PT_PARM4_REG scratch.r3 #define __PT_PARM5_REG scratch.r4 +#define __PT_PARM6_REG scratch.r5 +#define __PT_PARM7_REG scratch.r6 +#define __PT_PARM8_REG scratch.r7 + +/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + #define __PT_RET_REG scratch.blink -#define __PT_FP_REG __unsupported__ +#define __PT_FP_REG scratch.fp #define __PT_RC_REG scratch.r0 #define __PT_SP_REG scratch.sp #define __PT_IP_REG scratch.ret -/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */ + +#elif defined(bpf_target_loongarch) + +/* + * https://docs.kernel.org/loongarch/introduction.html + * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html + */ + +/* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */ +#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) +#define __PT_PARM1_REG regs[4] +#define __PT_PARM2_REG regs[5] +#define __PT_PARM3_REG regs[6] +#define __PT_PARM4_REG regs[7] +#define __PT_PARM5_REG regs[8] +#define __PT_PARM6_REG regs[9] +#define __PT_PARM7_REG regs[10] +#define __PT_PARM8_REG regs[11] + +/* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */ #define PT_REGS_SYSCALL_REGS(ctx) ctx +#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG +#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG +#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG +#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG +#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG + +#define __PT_RET_REG regs[1] +#define __PT_FP_REG regs[22] +#define __PT_RC_REG regs[4] +#define __PT_SP_REG regs[3] +#define __PT_IP_REG csr_era #endif @@ -264,16 +448,49 @@ struct pt_regs___arm64 { struct pt_regs; -/* allow some architecutres to override `struct pt_regs` */ +/* allow some architectures to override `struct pt_regs` */ #ifndef __PT_REGS_CAST #define __PT_REGS_CAST(x) (x) #endif +/* + * Different architectures support different number of arguments passed + * through registers. i386 supports just 3, some arches support up to 8. + */ +#ifndef __PT_PARM4_REG +#define __PT_PARM4_REG __unsupported__ +#endif +#ifndef __PT_PARM5_REG +#define __PT_PARM5_REG __unsupported__ +#endif +#ifndef __PT_PARM6_REG +#define __PT_PARM6_REG __unsupported__ +#endif +#ifndef __PT_PARM7_REG +#define __PT_PARM7_REG __unsupported__ +#endif +#ifndef __PT_PARM8_REG +#define __PT_PARM8_REG __unsupported__ +#endif +/* + * Similarly, syscall-specific conventions might differ between function call + * conventions within each architecutre. All supported architectures pass + * either 6 or 7 syscall arguments in registers. + * + * See syscall(2) manpage for succinct table with information on each arch. + */ +#ifndef __PT_PARM7_SYSCALL_REG +#define __PT_PARM7_SYSCALL_REG __unsupported__ +#endif + #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG) #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG) #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG) #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG) #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG) +#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG) +#define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG) +#define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG) #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG) #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG) #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG) @@ -285,6 +502,9 @@ struct pt_regs; #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG) #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG) #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG) +#define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG) +#define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG) +#define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG) #define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG) #define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG) #define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG) @@ -311,24 +531,33 @@ struct pt_regs; #endif #ifndef PT_REGS_PARM1_SYSCALL -#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x) +#define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG) +#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG) +#endif +#ifndef PT_REGS_PARM2_SYSCALL +#define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG) +#define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG) +#endif +#ifndef PT_REGS_PARM3_SYSCALL +#define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG) +#define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG) #endif -#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x) -#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x) #ifndef PT_REGS_PARM4_SYSCALL -#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x) +#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG) +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG) #endif -#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x) - -#ifndef PT_REGS_PARM1_CORE_SYSCALL -#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x) +#ifndef PT_REGS_PARM5_SYSCALL +#define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG) +#define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG) #endif -#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x) -#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x) -#ifndef PT_REGS_PARM4_CORE_SYSCALL -#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x) +#ifndef PT_REGS_PARM6_SYSCALL +#define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG) +#define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG) +#endif +#ifndef PT_REGS_PARM7_SYSCALL +#define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG) +#define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG) #endif -#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x) #else /* defined(bpf_target_defined) */ @@ -337,6 +566,9 @@ struct pt_regs; #define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) @@ -348,6 +580,9 @@ struct pt_regs; #define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) @@ -362,12 +597,16 @@ struct pt_regs; #define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #endif /* defined(bpf_target_defined) */ @@ -553,6 +792,9 @@ struct pt_regs; #define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) #define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) #define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx) +#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx) +#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx) #define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) /* @@ -609,6 +851,8 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs) #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs) #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs) +#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs) +#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs) #define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) /* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */ @@ -618,6 +862,8 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs) #define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs) #define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs) +#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs) +#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs) #define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args) /* @@ -667,4 +913,11 @@ ____##name(struct pt_regs *ctx, ##args) #define BPF_KPROBE_SYSCALL BPF_KSYSCALL +/* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE, + * but are named way less confusingly for SEC("uprobe") and SEC("uretprobe") + * use cases. + */ +#define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args) +#define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args) + #endif diff --git a/contrib/libbpf/src/btf.c b/contrib/libbpf/src/btf.c index b0325000..0a2c0792 100644 --- a/contrib/libbpf/src/btf.c +++ b/contrib/libbpf/src/btf.c @@ -1000,11 +1000,9 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, } } - err = 0; - if (!btf_data) { pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path); - err = -ENOENT; + err = -ENODATA; goto done; } btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); @@ -1350,9 +1348,9 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) void *ptr; int err; - /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so + /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if - * bpf_obj_get_info_by_fd() needs a bigger buffer. + * bpf_btf_get_info_by_fd() needs a bigger buffer. */ last_size = 4096; ptr = malloc(last_size); @@ -1362,7 +1360,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; - err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); if (!err && btf_info.btf_size > last_size) { void *temp_ptr; @@ -1380,7 +1378,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; - err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); } if (err || btf_info.btf_size > last_size) { diff --git a/contrib/libbpf/src/gen_loader.c b/contrib/libbpf/src/gen_loader.c index 23f5c467..83e8e3bf 100644 --- a/contrib/libbpf/src/gen_loader.c +++ b/contrib/libbpf/src/gen_loader.c @@ -560,7 +560,7 @@ static void emit_find_attach_target(struct bpf_gen *gen) } void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, - bool is_typeless, int kind, int insn_idx) + bool is_typeless, bool is_ld64, int kind, int insn_idx) { struct ksym_relo_desc *relo; @@ -574,6 +574,7 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, relo->name = name; relo->is_weak = is_weak; relo->is_typeless = is_typeless; + relo->is_ld64 = is_ld64; relo->kind = kind; relo->insn_idx = insn_idx; gen->relo_cnt++; @@ -586,9 +587,11 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des int i; for (i = 0; i < gen->nr_ksyms; i++) { - if (!strcmp(gen->ksyms[i].name, relo->name)) { - gen->ksyms[i].ref++; - return &gen->ksyms[i]; + kdesc = &gen->ksyms[i]; + if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 && + !strcmp(kdesc->name, relo->name)) { + kdesc->ref++; + return kdesc; } } kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc)); @@ -603,6 +606,7 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des kdesc->ref = 1; kdesc->off = 0; kdesc->insn = 0; + kdesc->is_ld64 = relo->is_ld64; return kdesc; } @@ -804,11 +808,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, return; /* try to copy from existing ldimm64 insn */ if (kdesc->ref > 1) { - move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, - kdesc->insn + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); - /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */ + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob + * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn + */ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); goto clear_src_reg; } @@ -831,7 +837,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); /* skip src_reg adjustment */ - emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ reg_mask = src_reg_mask(); @@ -862,23 +868,17 @@ static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn { int insn; - pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx); + pr_debug("gen: emit_relo (%d): %s at %d %s\n", + relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call"); insn = insns + sizeof(struct bpf_insn) * relo->insn_idx; emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn)); - switch (relo->kind) { - case BTF_KIND_VAR: + if (relo->is_ld64) { if (relo->is_typeless) emit_relo_ksym_typeless(gen, relo, insn); else emit_relo_ksym_btf(gen, relo, insn); - break; - case BTF_KIND_FUNC: + } else { emit_relo_kfunc_btf(gen, relo, insn); - break; - default: - pr_warn("Unknown relocation kind '%d'\n", relo->kind); - gen->error = -EDOM; - return; } } @@ -901,18 +901,20 @@ static void cleanup_core_relo(struct bpf_gen *gen) static void cleanup_relos(struct bpf_gen *gen, int insns) { + struct ksym_desc *kdesc; int i, insn; for (i = 0; i < gen->nr_ksyms; i++) { + kdesc = &gen->ksyms[i]; /* only close fds for typed ksyms and kfuncs */ - if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) { + if (kdesc->is_ld64 && !kdesc->typeless) { /* close fd recorded in insn[insn_idx + 1].imm */ - insn = gen->ksyms[i].insn; + insn = kdesc->insn; insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm); emit_sys_close_blob(gen, insn); - } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) { - emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off)); - if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ) + } else if (!kdesc->is_ld64) { + emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off)); + if (kdesc->off < MAX_FD_ARRAY_SZ) gen->nr_fd_array--; } } diff --git a/contrib/libbpf/src/libbpf.c b/contrib/libbpf/src/libbpf.c index 79be9afc..dd6ca6be 100644 --- a/contrib/libbpf/src/libbpf.c +++ b/contrib/libbpf/src/libbpf.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -54,6 +53,7 @@ #include "libbpf_internal.h" #include "hashmap.h" #include "bpf_gen_internal.h" +#include "zip.h" #ifndef BPF_FS_MAGIC #define BPF_FS_MAGIC 0xcafe4a11 @@ -116,6 +116,7 @@ static const char * const attach_type_name[] = { [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", [BPF_PERF_EVENT] = "perf_event", [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", + [BPF_STRUCT_OPS] = "struct_ops", }; static const char * const link_type_name[] = { @@ -215,9 +216,10 @@ static libbpf_print_fn_t __libbpf_pr = __base_pr; libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) { - libbpf_print_fn_t old_print_fn = __libbpf_pr; + libbpf_print_fn_t old_print_fn; + + old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); - __libbpf_pr = fn; return old_print_fn; } @@ -226,8 +228,10 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) { va_list args; int old_errno; + libbpf_print_fn_t print_fn; - if (!__libbpf_pr) + print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); + if (!print_fn) return; old_errno = errno; @@ -315,8 +319,8 @@ enum reloc_type { RELO_LD64, RELO_CALL, RELO_DATA, - RELO_EXTERN_VAR, - RELO_EXTERN_FUNC, + RELO_EXTERN_LD64, + RELO_EXTERN_CALL, RELO_SUBPROG_ADDR, RELO_CORE, }; @@ -329,6 +333,7 @@ struct reloc_desc { struct { int map_idx; int sym_off; + int ext_idx; }; }; }; @@ -467,6 +472,7 @@ struct bpf_struct_ops { #define KCONFIG_SEC ".kconfig" #define KSYMS_SEC ".ksyms" #define STRUCT_OPS_SEC ".struct_ops" +#define STRUCT_OPS_LINK_SEC ".struct_ops.link" enum libbpf_map_type { LIBBPF_MAP_UNSPEC, @@ -596,6 +602,7 @@ struct elf_state { Elf64_Ehdr *ehdr; Elf_Data *symbols; Elf_Data *st_ops_data; + Elf_Data *st_ops_link_data; size_t shstrndx; /* section index for section name strings */ size_t strtabidx; struct elf_sec_desc *secs; @@ -605,6 +612,7 @@ struct elf_state { int text_shndx; int symbols_shndx; int st_ops_shndx; + int st_ops_link_shndx; }; struct usdt_manager; @@ -799,7 +807,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, progs = obj->programs; nr_progs = obj->nr_programs; nr_syms = symbols->d_size / sizeof(Elf64_Sym); - sec_off = 0; for (i = 0; i < nr_syms; i++) { sym = elf_sym_by_idx(obj, i); @@ -870,42 +877,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return 0; } -__u32 get_kernel_version(void) -{ - /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release, - * but Ubuntu provides /proc/version_signature file, as described at - * https://ubuntu.com/kernel, with an example contents below, which we - * can use to get a proper LINUX_VERSION_CODE. - * - * Ubuntu 5.4.0-12.15-generic 5.4.8 - * - * In the above, 5.4.8 is what kernel is actually expecting, while - * uname() call will return 5.4.0 in info.release. - */ - const char *ubuntu_kver_file = "/proc/version_signature"; - __u32 major, minor, patch; - struct utsname info; - - if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) == 0) { - FILE *f; - - f = fopen(ubuntu_kver_file, "r"); - if (f) { - if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) { - fclose(f); - return KERNEL_VERSION(major, minor, patch); - } - fclose(f); - } - /* something went wrong, fall back to uname() approach */ - } - - uname(&info); - if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) - return 0; - return KERNEL_VERSION(major, minor, patch); -} - static const struct btf_member * find_member_by_offset(const struct btf_type *t, __u32 bit_offset) { @@ -1155,7 +1126,8 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) return 0; } -static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) +static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, + int shndx, Elf_Data *data, __u32 map_flags) { const struct btf_type *type, *datasec; const struct btf_var_secinfo *vsi; @@ -1166,15 +1138,15 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) struct bpf_map *map; __u32 i; - if (obj->efile.st_ops_shndx == -1) + if (shndx == -1) return 0; btf = obj->btf; - datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC, + datasec_id = btf__find_by_name_kind(btf, sec_name, BTF_KIND_DATASEC); if (datasec_id < 0) { pr_warn("struct_ops init: DATASEC %s not found\n", - STRUCT_OPS_SEC); + sec_name); return -EINVAL; } @@ -1187,7 +1159,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) type_id = btf__resolve_type(obj->btf, vsi->type); if (type_id < 0) { pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", - vsi->type, STRUCT_OPS_SEC); + vsi->type, sec_name); return -EINVAL; } @@ -1206,7 +1178,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) if (IS_ERR(map)) return PTR_ERR(map); - map->sec_idx = obj->efile.st_ops_shndx; + map->sec_idx = shndx; map->sec_offset = vsi->offset; map->name = strdup(var_name); if (!map->name) @@ -1216,6 +1188,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) map->def.key_size = sizeof(int); map->def.value_size = type->size; map->def.max_entries = 1; + map->def.map_flags = map_flags; map->st_ops = calloc(1, sizeof(*map->st_ops)); if (!map->st_ops) @@ -1228,14 +1201,14 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) return -ENOMEM; - if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { + if (vsi->offset + type->size > data->d_size) { pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", - var_name, STRUCT_OPS_SEC); + var_name, sec_name); return -EINVAL; } memcpy(st_ops->data, - obj->efile.st_ops_data->d_buf + vsi->offset, + data->d_buf + vsi->offset, type->size); st_ops->tname = tname; st_ops->type = type; @@ -1248,6 +1221,19 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) return 0; } +static int bpf_object_init_struct_ops(struct bpf_object *obj) +{ + int err; + + err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx, + obj->efile.st_ops_data, 0); + err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC, + obj->efile.st_ops_link_shndx, + obj->efile.st_ops_link_data, + BPF_F_LINK); + return err; +} + static struct bpf_object *bpf_object__new(const char *path, const void *obj_buf, size_t obj_buf_sz, @@ -1284,6 +1270,7 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.btf_maps_shndx = -1; obj->efile.st_ops_shndx = -1; + obj->efile.st_ops_link_shndx = -1; obj->kconfig_map_idx = -1; obj->kern_version = get_kernel_version(); @@ -1301,6 +1288,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj) obj->efile.elf = NULL; obj->efile.symbols = NULL; obj->efile.st_ops_data = NULL; + obj->efile.st_ops_link_data = NULL; zfree(&obj->efile.secs); obj->efile.sec_cnt = 0; @@ -2652,10 +2640,10 @@ static int bpf_object__init_maps(struct bpf_object *obj, strict = !OPTS_GET(opts, relaxed_maps, false); pin_root_path = OPTS_GET(opts, pin_root_path, NULL); - err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path); + err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); err = err ?: bpf_object__init_global_data_maps(obj); err = err ?: bpf_object__init_kconfig_map(obj); - err = err ?: bpf_object__init_struct_ops_maps(obj); + err = err ?: bpf_object_init_struct_ops(obj); return err; } @@ -2789,12 +2777,13 @@ static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || obj->efile.st_ops_shndx >= 0 || + obj->efile.st_ops_link_shndx >= 0 || obj->nr_extern > 0; } static bool kernel_needs_btf(const struct bpf_object *obj) { - return obj->efile.st_ops_shndx >= 0; + return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0; } static int bpf_object__init_btf(struct bpf_object *obj, @@ -3487,6 +3476,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj) } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { obj->efile.st_ops_data = data; obj->efile.st_ops_shndx = idx; + } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) { + obj->efile.st_ops_link_data = data; + obj->efile.st_ops_link_shndx = idx; } else { pr_info("elf: skipping unrecognized data section(%d) %s\n", idx, name); @@ -3501,6 +3493,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj) /* Only do relo for section with exec instructions */ if (!section_have_execinstr(obj, targ_sec_idx) && strcmp(name, ".rel" STRUCT_OPS_SEC) && + strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && strcmp(name, ".rel" MAPS_ELF_SEC)) { pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", idx, name, targ_sec_idx, @@ -4046,11 +4039,11 @@ static int bpf_program__record_reloc(struct bpf_program *prog, pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", prog->name, i, ext->name, ext->sym_idx, insn_idx); if (insn->code == (BPF_JMP | BPF_CALL)) - reloc_desc->type = RELO_EXTERN_FUNC; + reloc_desc->type = RELO_EXTERN_CALL; else - reloc_desc->type = RELO_EXTERN_VAR; + reloc_desc->type = RELO_EXTERN_LD64; reloc_desc->insn_idx = insn_idx; - reloc_desc->sym_off = i; /* sym_off stores extern index */ + reloc_desc->ext_idx = i; return 0; } @@ -4382,7 +4375,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) char *new_name; memset(&info, 0, len); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_map_get_info_by_fd(fd, &info, &len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(fd, &info); if (err) @@ -4766,7 +4759,7 @@ static int probe_module_btf(void) * kernel's module BTF support coincides with support for * name/name_len fields in struct bpf_btf_info. */ - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); close(fd); return !err; } @@ -4929,7 +4922,7 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) int err; memset(&map_info, 0, map_info_len); - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); + err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); if (err) { @@ -5474,7 +5467,7 @@ static int load_module_btfs(struct bpf_object *obj) info.name = ptr_to_u64(name); info.name_len = sizeof(name); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { err = -errno; pr_warn("failed to get BTF object #%d info: %d\n", id, err); @@ -5819,8 +5812,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) } /* base map load ldimm64 special constant, used also for log fixup logic */ -#define MAP_LDIMM64_POISON_BASE 2001000000 -#define MAP_LDIMM64_POISON_PFX "200100" +#define POISON_LDIMM64_MAP_BASE 2001000000 +#define POISON_LDIMM64_MAP_PFX "200100" static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, int insn_idx, struct bpf_insn *insn, @@ -5842,12 +5835,36 @@ static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, * invalid func unknown#2001000123 * where lower 123 is map index into obj->maps[] array */ - insn->imm = MAP_LDIMM64_POISON_BASE + map_idx; + insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; insn++; } } +/* unresolved kfunc call special constant, used also for log fixup logic */ +#define POISON_CALL_KFUNC_BASE 2002000000 +#define POISON_CALL_KFUNC_PFX "2002" + +static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, + int insn_idx, struct bpf_insn *insn, + int ext_idx, const struct extern_desc *ext) +{ + pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", + prog->name, relo_idx, insn_idx, ext->name); + + /* we turn kfunc call into invalid helper call with identifiable constant */ + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with something like: + * invalid func unknown#2001000123 + * where lower 123 is extern index into obj->externs[] array + */ + insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; +} + /* Relocate data references within program code: * - map references; * - global variable references; @@ -5892,8 +5909,8 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) relo->map_idx, map); } break; - case RELO_EXTERN_VAR: - ext = &obj->externs[relo->sym_off]; + case RELO_EXTERN_LD64: + ext = &obj->externs[relo->ext_idx]; if (ext->type == EXT_KCFG) { if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; @@ -5914,15 +5931,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) } } break; - case RELO_EXTERN_FUNC: - ext = &obj->externs[relo->sym_off]; + case RELO_EXTERN_CALL: + ext = &obj->externs[relo->ext_idx]; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; if (ext->is_set) { insn[0].imm = ext->ksym.kernel_btf_id; insn[0].off = ext->ksym.btf_fd_idx; - } else { /* unresolved weak kfunc */ - insn[0].imm = 0; - insn[0].off = 0; + } else { /* unresolved weak kfunc call */ + poison_kfunc_call(prog, i, relo->insn_idx, insn, + relo->ext_idx, ext); } break; case RELO_SUBPROG_ADDR: @@ -6116,7 +6133,11 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra if (main_prog == subprog) return 0; relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); - if (!relos) + /* if new count is zero, reallocarray can return a valid NULL result; + * in this case the previous pointer will be freed, so we *have to* + * reassign old pointer to the new value (even if it's NULL) + */ + if (!relos && new_cnt) return -ENOMEM; if (subprog->nr_reloc) memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, @@ -6152,7 +6173,7 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, continue; relo = find_prog_insn_relo(prog, insn_idx); - if (relo && relo->type == RELO_EXTERN_FUNC) + if (relo && relo->type == RELO_EXTERN_CALL) /* kfunc relocations will be handled later * in bpf_object__relocate_data() */ @@ -6647,7 +6668,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj) return -LIBBPF_ERRNO__INTERNAL; } - if (idx == obj->efile.st_ops_shndx) + if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx) err = bpf_object__collect_st_ops_relos(obj, shdr, data); else if (idx == obj->efile.btf_maps_shndx) err = bpf_object__collect_map_relos(obj, shdr, data); @@ -7030,13 +7051,13 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { - /* Expected log for failed and not properly guarded CO-RE relocation: + /* Expected log for failed and not properly guarded map reference: * line1 -> 123: (85) call unknown#2001000345 * line2 -> invalid func unknown#2001000345 * line3 -> * * "123" is the index of the instruction that was poisoned. - * "345" in "2001000345" are map index in obj->maps to fetch map name. + * "345" in "2001000345" is a map index in obj->maps to fetch map name. */ struct bpf_object *obj = prog->obj; const struct bpf_map *map; @@ -7046,7 +7067,7 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) return; - map_idx -= MAP_LDIMM64_POISON_BASE; + map_idx -= POISON_LDIMM64_MAP_BASE; if (map_idx < 0 || map_idx >= obj->nr_maps) return; map = &obj->maps[map_idx]; @@ -7059,6 +7080,39 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } +static void fixup_log_missing_kfunc_call(struct bpf_program *prog, + char *buf, size_t buf_sz, size_t log_sz, + char *line1, char *line2, char *line3) +{ + /* Expected log for failed and not properly guarded kfunc call: + * line1 -> 123: (85) call unknown#2002000345 + * line2 -> invalid func unknown#2002000345 + * line3 -> + * + * "123" is the index of the instruction that was poisoned. + * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. + */ + struct bpf_object *obj = prog->obj; + const struct extern_desc *ext; + int insn_idx, ext_idx; + char patch[128]; + + if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) + return; + + ext_idx -= POISON_CALL_KFUNC_BASE; + if (ext_idx < 0 || ext_idx >= obj->nr_extern) + return; + ext = &obj->externs[ext_idx]; + + snprintf(patch, sizeof(patch), + "%d: \n" + "kfunc '%s' is referenced but wasn't resolved\n", + insn_idx, ext->name); + + patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); +} + static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) { /* look for familiar error patterns in last N lines of the log */ @@ -7078,23 +7132,33 @@ static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_s if (!cur_line) return; - /* failed CO-RE relocation case */ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* failed CO-RE relocation case */ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; - } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) { + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* reference to uncreated BPF map */ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { + prev_line = find_prev_line(buf, cur_line); + if (!prev_line) + continue; + + /* reference to unresolved kfunc */ + fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, + prev_line, cur_line, next_line); + return; } } } @@ -7106,19 +7170,22 @@ static int bpf_program_record_relos(struct bpf_program *prog) for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; - struct extern_desc *ext = &obj->externs[relo->sym_off]; + struct extern_desc *ext = &obj->externs[relo->ext_idx]; + int kind; switch (relo->type) { - case RELO_EXTERN_VAR: + case RELO_EXTERN_LD64: if (ext->type != EXT_KSYM) continue; + kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? + BTF_KIND_VAR : BTF_KIND_FUNC; bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, !ext->ksym.type_id, - BTF_KIND_VAR, relo->insn_idx); + true, kind, relo->insn_idx); break; - case RELO_EXTERN_FUNC: + case RELO_EXTERN_CALL: bpf_gen__record_extern(obj->gen_loader, ext->name, - ext->is_weak, false, BTF_KIND_FUNC, + ext->is_weak, false, false, BTF_KIND_FUNC, relo->insn_idx); break; case RELO_CORE: { @@ -7355,7 +7422,7 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) if (!bpf_map__is_internal(m)) continue; if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) - m->def.map_flags ^= BPF_F_MMAPABLE; + m->def.map_flags &= ~BPF_F_MMAPABLE; } return 0; @@ -7541,8 +7608,9 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, kern_btf, kfunc_proto_id); if (ret <= 0) { - pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n", - ext->name, local_func_proto_id, kfunc_proto_id); + pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", + ext->name, local_func_proto_id, + mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); return -EINVAL; } @@ -7570,8 +7638,14 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ext->is_set = true; ext->ksym.kernel_btf_id = kfunc_id; ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; - pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", - ext->name, kfunc_id); + /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() + * populates FD into ld_imm64 insn when it's used to point to kfunc. + * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. + * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. + */ + ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; + pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", + ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); return 0; } @@ -7714,6 +7788,37 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, return 0; } +static void bpf_map_prepare_vdata(const struct bpf_map *map) +{ + struct bpf_struct_ops *st_ops; + __u32 i; + + st_ops = map->st_ops; + for (i = 0; i < btf_vlen(st_ops->type); i++) { + struct bpf_program *prog = st_ops->progs[i]; + void *kern_data; + int prog_fd; + + if (!prog) + continue; + + prog_fd = bpf_program__fd(prog); + kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; + *(unsigned long *)kern_data = prog_fd; + } +} + +static int bpf_object_prepare_struct_ops(struct bpf_object *obj) +{ + int i; + + for (i = 0; i < obj->nr_maps; i++) + if (bpf_map__is_struct_ops(&obj->maps[i])) + bpf_map_prepare_vdata(&obj->maps[i]); + + return 0; +} + static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { int err, i; @@ -7739,6 +7844,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); err = err ? : bpf_object__load_progs(obj, extra_log_level); err = err ? : bpf_object_init_prog_arrays(obj); + err = err ? : bpf_object_prepare_struct_ops(obj); if (obj->gen_loader) { /* reset FDs */ @@ -8399,7 +8505,8 @@ int bpf_program__set_insns(struct bpf_program *prog, return -EBUSY; insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); - if (!insns) { + /* NULL is a valid return from reallocarray if the new count is zero */ + if (!insns && new_insn_cnt) { pr_warn("prog '%s': failed to realloc prog code\n", prog->name); return -ENOMEM; } @@ -8429,12 +8536,31 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) return prog->type; } +static size_t custom_sec_def_cnt; +static struct bpf_sec_def *custom_sec_defs; +static struct bpf_sec_def custom_fallback_def; +static bool has_custom_fallback_def; +static int last_custom_sec_def_handler_id; + int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { if (prog->obj->loaded) return libbpf_err(-EBUSY); + /* if type is not changed, do nothing */ + if (prog->type == type) + return 0; + prog->type = type; + + /* If a program type was changed, we need to reset associated SEC() + * handler, as it will be invalid now. The only exception is a generic + * fallback handler, which by definition is program type-agnostic and + * is a catch-all custom handler, optionally set by the application, + * so should be able to handle any type of BPF program. + */ + if (prog->sec_def != &custom_fallback_def) + prog->sec_def = NULL; return 0; } @@ -8605,16 +8731,10 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), + SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), }; -static size_t custom_sec_def_cnt; -static struct bpf_sec_def *custom_sec_defs; -static struct bpf_sec_def custom_fallback_def; -static bool has_custom_fallback_def; - -static int last_custom_sec_def_handler_id; - int libbpf_register_prog_handler(const char *sec, enum bpf_prog_type prog_type, enum bpf_attach_type exp_attach_type, @@ -8694,7 +8814,11 @@ int libbpf_unregister_prog_handler(int handler_id) /* try to shrink the array, but it's ok if we couldn't */ sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); - if (sec_defs) + /* if new count is zero, reallocarray can return a valid NULL result; + * in this case the previous pointer will be freed, so we *have to* + * reassign old pointer to the new value (even if it's NULL) + */ + if (sec_defs || custom_sec_def_cnt == 0) custom_sec_defs = sec_defs; return 0; @@ -8847,6 +8971,7 @@ const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) } static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, + int sec_idx, size_t offset) { struct bpf_map *map; @@ -8856,7 +8981,8 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, map = &obj->maps[i]; if (!bpf_map__is_struct_ops(map)) continue; - if (map->sec_offset <= offset && + if (map->sec_idx == sec_idx && + map->sec_offset <= offset && offset - map->sec_offset < map->def.value_size) return map; } @@ -8898,7 +9024,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, } name = elf_sym_str(obj, sym->st_name) ?: ""; - map = find_struct_ops_map_by_offset(obj, rel->r_offset); + map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); if (!map) { pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", (size_t)rel->r_offset); @@ -8965,8 +9091,9 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, } /* struct_ops BPF prog can be re-used between multiple - * .struct_ops as long as it's the same struct_ops struct - * definition and the same function pointer field + * .struct_ops & .struct_ops.link as long as it's the + * same struct_ops struct definition and the same + * function pointer field */ if (prog->attach_btf_id != st_ops->type_id || prog->expected_attach_type != member_idx) { @@ -9066,9 +9193,9 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) int err; memset(&info, 0, info_len); - err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); if (err) { - pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", + pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", attach_prog_fd, err); return err; } @@ -9760,6 +9887,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p char errmsg[STRERR_BUFSIZE]; struct bpf_link_perf *link; int prog_fd, link_fd = -1, err; + bool force_ioctl_attach; if (!OPTS_VALID(opts, bpf_perf_event_opts)) return libbpf_err_ptr(-EINVAL); @@ -9783,7 +9911,8 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p link->link.dealloc = &bpf_link_perf_dealloc; link->perf_event_fd = pfd; - if (kernel_supports(prog->obj, FEAT_PERF_LINK)) { + force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); + if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); @@ -9946,16 +10075,20 @@ static int append_to_file(const char *file, const char *fmt, ...) { int fd, n, err = 0; va_list ap; + char buf[1024]; + + va_start(ap, fmt); + n = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + if (n < 0 || n >= sizeof(buf)) + return -EINVAL; fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); if (fd < 0) return -errno; - va_start(ap, fmt); - n = vdprintf(fd, fmt, ap); - va_end(ap); - - if (n < 0) + if (write(fd, buf, n) < 0) err = -errno; close(fd); @@ -9994,9 +10127,16 @@ static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, const char *kfunc_name, size_t offset) { static int index = 0; + int i; snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, __sync_fetch_and_add(&index, 1)); + + /* sanitize binary_path in the probe name */ + for (i = 0; buf[i]; i++) { + if (!isalnum(buf[i])) + buf[i] = '_'; + } } static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, @@ -10135,6 +10275,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const struct bpf_kprobe_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + enum probe_attach_mode attach_mode; char errmsg[STRERR_BUFSIZE]; char *legacy_probe = NULL; struct bpf_link *link; @@ -10145,11 +10286,32 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, if (!OPTS_VALID(opts, bpf_kprobe_opts)) return libbpf_err_ptr(-EINVAL); + attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); retprobe = OPTS_GET(opts, retprobe, false); offset = OPTS_GET(opts, offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); legacy = determine_kprobe_perf_type() < 0; + switch (attach_mode) { + case PROBE_ATTACH_MODE_LEGACY: + legacy = true; + pe_opts.force_ioctl_attach = true; + break; + case PROBE_ATTACH_MODE_PERF: + if (legacy) + return libbpf_err_ptr(-ENOTSUP); + pe_opts.force_ioctl_attach = true; + break; + case PROBE_ATTACH_MODE_LINK: + if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) + return libbpf_err_ptr(-ENOTSUP); + break; + case PROBE_ATTACH_MODE_DEFAULT: + break; + default: + return libbpf_err_ptr(-EINVAL); + } + if (!legacy) { pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, offset, @@ -10560,32 +10722,19 @@ static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) return NULL; } -/* Find offset of function name in object specified by path. "name" matches - * symbol name or name@@LIB for library functions. +/* Find offset of function name in the provided ELF object. "binary_path" is + * the path to the ELF binary represented by "elf", and only used for error + * reporting matters. "name" matches symbol name or name@@LIB for library + * functions. */ -static long elf_find_func_offset(const char *binary_path, const char *name) +static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) { - int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; bool is_shared_lib, is_name_qualified; - char errmsg[STRERR_BUFSIZE]; long ret = -ENOENT; size_t name_len; GElf_Ehdr ehdr; - Elf *elf; - fd = open(binary_path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = -errno; - pr_warn("failed to open %s: %s\n", binary_path, - libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); - return ret; - } - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); - close(fd); - return -LIBBPF_ERRNO__FORMAT; - } if (!gelf_getehdr(elf, &ehdr)) { pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); ret = -LIBBPF_ERRNO__FORMAT; @@ -10598,7 +10747,7 @@ static long elf_find_func_offset(const char *binary_path, const char *name) /* Does name specify "@@LIB"? */ is_name_qualified = strstr(name, "@@") != NULL; - /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if + /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically * linked binary may not have SHT_DYMSYM, so absence of a section should not be * reported as a warning/error. @@ -10711,11 +10860,101 @@ static long elf_find_func_offset(const char *binary_path, const char *name) } } out: + return ret; +} + +/* Find offset of function name in ELF object specified by path. "name" matches + * symbol name or name@@LIB for library functions. + */ +static long elf_find_func_offset_from_file(const char *binary_path, const char *name) +{ + char errmsg[STRERR_BUFSIZE]; + long ret = -ENOENT; + Elf *elf; + int fd; + + fd = open(binary_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = -errno; + pr_warn("failed to open %s: %s\n", binary_path, + libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + return ret; + } + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); + close(fd); + return -LIBBPF_ERRNO__FORMAT; + } + + ret = elf_find_func_offset(elf, binary_path, name); elf_end(elf); close(fd); return ret; } +/* Find offset of function name in archive specified by path. Currently + * supported are .zip files that do not compress their contents, as used on + * Android in the form of APKs, for example. "file_name" is the name of the ELF + * file inside the archive. "func_name" matches symbol name or name@@LIB for + * library functions. + * + * An overview of the APK format specifically provided here: + * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents + */ +static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, + const char *func_name) +{ + struct zip_archive *archive; + struct zip_entry entry; + long ret; + Elf *elf; + + archive = zip_archive_open(archive_path); + if (IS_ERR(archive)) { + ret = PTR_ERR(archive); + pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); + return ret; + } + + ret = zip_archive_find_entry(archive, file_name, &entry); + if (ret) { + pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, + archive_path, ret); + goto out; + } + pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, + (unsigned long)entry.data_offset); + + if (entry.compression) { + pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, + archive_path); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + + elf = elf_memory((void *)entry.data, entry.data_length); + if (!elf) { + pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, + elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__LIBELF; + goto out; + } + + ret = elf_find_func_offset(elf, file_name, func_name); + if (ret > 0) { + pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", + func_name, file_name, archive_path, entry.data_offset, ret, + ret + entry.data_offset); + ret += entry.data_offset; + } + elf_end(elf); + +out: + zip_archive_close(archive); + return ret; +} + static const char *arch_specific_lib_paths(void) { /* @@ -10801,9 +11040,11 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, const struct bpf_uprobe_opts *opts) { - DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + const char *archive_path = NULL, *archive_sep = NULL; char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; - char full_binary_path[PATH_MAX]; + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + enum probe_attach_mode attach_mode; + char full_path[PATH_MAX]; struct bpf_link *link; size_t ref_ctr_off; int pfd, err; @@ -10813,6 +11054,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, if (!OPTS_VALID(opts, bpf_uprobe_opts)) return libbpf_err_ptr(-EINVAL); + attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); retprobe = OPTS_GET(opts, retprobe, false); ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); @@ -10820,27 +11062,60 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, if (!binary_path) return libbpf_err_ptr(-EINVAL); - if (!strchr(binary_path, '/')) { - err = resolve_full_path(binary_path, full_binary_path, - sizeof(full_binary_path)); + /* Check if "binary_path" refers to an archive. */ + archive_sep = strstr(binary_path, "!/"); + if (archive_sep) { + full_path[0] = '\0'; + libbpf_strlcpy(full_path, binary_path, + min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); + archive_path = full_path; + binary_path = archive_sep + 2; + } else if (!strchr(binary_path, '/')) { + err = resolve_full_path(binary_path, full_path, sizeof(full_path)); if (err) { pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", prog->name, binary_path, err); return libbpf_err_ptr(err); } - binary_path = full_binary_path; + binary_path = full_path; } func_name = OPTS_GET(opts, func_name, NULL); if (func_name) { long sym_off; - sym_off = elf_find_func_offset(binary_path, func_name); + if (archive_path) { + sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, + func_name); + binary_path = archive_path; + } else { + sym_off = elf_find_func_offset_from_file(binary_path, func_name); + } if (sym_off < 0) return libbpf_err_ptr(sym_off); func_offset += sym_off; } legacy = determine_uprobe_perf_type() < 0; + switch (attach_mode) { + case PROBE_ATTACH_MODE_LEGACY: + legacy = true; + pe_opts.force_ioctl_attach = true; + break; + case PROBE_ATTACH_MODE_PERF: + if (legacy) + return libbpf_err_ptr(-ENOTSUP); + pe_opts.force_ioctl_attach = true; + break; + case PROBE_ATTACH_MODE_LINK: + if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) + return libbpf_err_ptr(-ENOTSUP); + break; + case PROBE_ATTACH_MODE_DEFAULT: + break; + default: + return libbpf_err_ptr(-EINVAL); + } + if (!legacy) { pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid, ref_ctr_off); @@ -11014,6 +11289,7 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, return libbpf_err_ptr(err); return link; } +#endif static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) { @@ -11043,7 +11319,6 @@ static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_l free(name); return err; } -#endif static int determine_tracepoint_id(const char *tp_category, const char *tp_name) @@ -11460,22 +11735,30 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog) return link; } +struct bpf_link_struct_ops { + struct bpf_link link; + int map_fd; +}; + static int bpf_link__detach_struct_ops(struct bpf_link *link) { + struct bpf_link_struct_ops *st_link; __u32 zero = 0; - if (bpf_map_delete_elem(link->fd, &zero)) - return -errno; + st_link = container_of(link, struct bpf_link_struct_ops, link); - return 0; + if (st_link->map_fd < 0) + /* w/o a real link */ + return bpf_map_delete_elem(link->fd, &zero); + + return close(link->fd); } struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) { - struct bpf_struct_ops *st_ops; - struct bpf_link *link; - __u32 i, zero = 0; - int err; + struct bpf_link_struct_ops *link; + __u32 zero = 0; + int err, fd; if (!bpf_map__is_struct_ops(map) || map->fd == -1) return libbpf_err_ptr(-EINVAL); @@ -11484,31 +11767,72 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) if (!link) return libbpf_err_ptr(-EINVAL); - st_ops = map->st_ops; - for (i = 0; i < btf_vlen(st_ops->type); i++) { - struct bpf_program *prog = st_ops->progs[i]; - void *kern_data; - int prog_fd; + /* kern_vdata should be prepared during the loading phase. */ + err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); + /* It can be EBUSY if the map has been used to create or + * update a link before. We don't allow updating the value of + * a struct_ops once it is set. That ensures that the value + * never changed. So, it is safe to skip EBUSY. + */ + if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { + free(link); + return libbpf_err_ptr(err); + } - if (!prog) - continue; + link->link.detach = bpf_link__detach_struct_ops; - prog_fd = bpf_program__fd(prog); - kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; - *(unsigned long *)kern_data = prog_fd; + if (!(map->def.map_flags & BPF_F_LINK)) { + /* w/o a real link */ + link->link.fd = map->fd; + link->map_fd = -1; + return &link->link; } - err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); - if (err) { - err = -errno; + fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); + if (fd < 0) { free(link); - return libbpf_err_ptr(err); + return libbpf_err_ptr(fd); } - link->detach = bpf_link__detach_struct_ops; - link->fd = map->fd; + link->link.fd = fd; + link->map_fd = map->fd; - return link; + return &link->link; +} + +/* + * Swap the back struct_ops of a link with a new struct_ops map. + */ +int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) +{ + struct bpf_link_struct_ops *st_ops_link; + __u32 zero = 0; + int err; + + if (!bpf_map__is_struct_ops(map) || map->fd < 0) + return -EINVAL; + + st_ops_link = container_of(link, struct bpf_link_struct_ops, link); + /* Ensure the type of a link is correct */ + if (st_ops_link->map_fd < 0) + return -EINVAL; + + err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); + /* It can be EBUSY if the map has been used to create or + * update a link before. We don't allow updating the value of + * a struct_ops once it is set. That ensures that the value + * never changed. So, it is safe to skip EBUSY. + */ + if (err && err != -EBUSY) + return err; + + err = bpf_link_update(link->fd, map->fd, NULL); + if (err < 0) + return err; + + st_ops_link->map_fd = map->fd; + + return 0; } typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, @@ -11704,17 +12028,22 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_buffer_params p = {}; struct perf_event_attr attr; + __u32 sample_period; if (!OPTS_VALID(opts, perf_buffer_opts)) return libbpf_err_ptr(-EINVAL); + sample_period = OPTS_GET(opts, sample_period, 1); + if (!sample_period) + sample_period = 1; + memset(&attr, 0, attr_sz); attr.size = attr_sz; attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = 1; - attr.wakeup_events = 1; + attr.sample_period = sample_period; + attr.wakeup_events = sample_period; p.attr = &attr; p.sample_cb = sample_cb; @@ -11767,7 +12096,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, /* best-effort sanity checks */ memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); - err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); + err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return diff --git a/contrib/libbpf/src/libbpf.h b/contrib/libbpf/src/libbpf.h index eee883f0..0b736239 100644 --- a/contrib/libbpf/src/libbpf.h +++ b/contrib/libbpf/src/libbpf.h @@ -96,6 +96,14 @@ enum libbpf_print_level { typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, const char *, va_list ap); +/** + * @brief **libbpf_set_print()** sets user-provided log callback function to + * be used for libbpf warnings and informational messages. + * @param fn The log print function. If NULL, libbpf won't print anything. + * @return Pointer to old print function. + * + * This function is thread-safe. + */ LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); /* Hide internal to user */ @@ -174,6 +182,14 @@ struct bpf_object_open_opts { }; #define bpf_object_open_opts__last_field kernel_log_level +/** + * @brief **bpf_object__open()** creates a bpf_object by opening + * the BPF ELF object file pointed to by the passed path and loading it + * into memory. + * @param path BPF object file path. + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_object *bpf_object__open(const char *path); /** @@ -203,16 +219,46 @@ LIBBPF_API struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); -/* Load/unload object into/from kernel */ +/** + * @brief **bpf_object__load()** loads BPF object into kernel. + * @param obj Pointer to a valid BPF object instance returned by + * **bpf_object__open*()** APIs + * @return 0, on success; negative error code, otherwise, error code is + * stored in errno + */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); -LIBBPF_API void bpf_object__close(struct bpf_object *object); +/** + * @brief **bpf_object__close()** closes a BPF object and releases all + * resources. + * @param obj Pointer to a valid BPF object + */ +LIBBPF_API void bpf_object__close(struct bpf_object *obj); -/* pin_maps and unpin_maps can both be called with a NULL path, in which case - * they will use the pin_path attribute of each map (and ignore all maps that - * don't have a pin_path set). +/** + * @brief **bpf_object__pin_maps()** pins each map contained within + * the BPF object at the passed directory. + * @param obj Pointer to a valid BPF object + * @param path A directory where maps should be pinned. + * @return 0, on success; negative error code, otherwise + * + * If `path` is NULL `bpf_map__pin` (which is being used on each map) + * will use the pin_path attribute of each map. In this case, maps that + * don't have a pin_path set will be ignored. */ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); + +/** + * @brief **bpf_object__unpin_maps()** unpins each map contained within + * the BPF object found in the passed directory. + * @param obj Pointer to a valid BPF object + * @param path A directory where pinned maps should be searched for. + * @return 0, on success; negative error code, otherwise + * + * If `path` is NULL `bpf_map__unpin` (which is being used on each map) + * will use the pin_path attribute of each map. In this case, maps that + * don't have a pin_path set will be ignored. + */ LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, @@ -403,12 +449,15 @@ LIBBPF_API struct bpf_link * bpf_program__attach(const struct bpf_program *prog); struct bpf_perf_event_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; + /* don't use BPF link when attach BPF program */ + bool force_ioctl_attach; + size_t :0; }; -#define bpf_perf_event_opts__last_field bpf_cookie +#define bpf_perf_event_opts__last_field force_ioctl_attach LIBBPF_API struct bpf_link * bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); @@ -417,8 +466,25 @@ LIBBPF_API struct bpf_link * bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts); +/** + * enum probe_attach_mode - the mode to attach kprobe/uprobe + * + * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will + * be returned if it is not supported by the kernel. + */ +enum probe_attach_mode { + /* attach probe in latest supported mode by kernel */ + PROBE_ATTACH_MODE_DEFAULT = 0, + /* attach probe in legacy mode, using debugfs/tracefs */ + PROBE_ATTACH_MODE_LEGACY, + /* create perf event with perf_event_open() syscall */ + PROBE_ATTACH_MODE_PERF, + /* attach probe with BPF link */ + PROBE_ATTACH_MODE_LINK, +}; + struct bpf_kprobe_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; @@ -426,9 +492,11 @@ struct bpf_kprobe_opts { size_t offset; /* kprobe is return probe */ bool retprobe; + /* kprobe attach mode */ + enum probe_attach_mode attach_mode; size_t :0; }; -#define bpf_kprobe_opts__last_field retprobe +#define bpf_kprobe_opts__last_field attach_mode LIBBPF_API struct bpf_link * bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, @@ -462,7 +530,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const struct bpf_kprobe_multi_opts *opts); struct bpf_ksyscall_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; @@ -508,7 +576,7 @@ bpf_program__attach_ksyscall(const struct bpf_program *prog, const struct bpf_ksyscall_opts *opts); struct bpf_uprobe_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* offset of kernel reference counted USDT semaphore, added in * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") @@ -526,9 +594,11 @@ struct bpf_uprobe_opts { * binary_path. */ const char *func_name; + /* uprobe attach mode */ + enum probe_attach_mode attach_mode; size_t :0; }; -#define bpf_uprobe_opts__last_field func_name +#define bpf_uprobe_opts__last_field attach_mode /** * @brief **bpf_program__attach_uprobe()** attaches a BPF program @@ -602,7 +672,7 @@ bpf_program__attach_usdt(const struct bpf_program *prog, const struct bpf_usdt_opts *opts); struct bpf_tracepoint_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; @@ -651,6 +721,7 @@ bpf_program__attach_freplace(const struct bpf_program *prog, struct bpf_map; LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); +LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map); struct bpf_iter_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ @@ -823,10 +894,57 @@ LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize * @return true, if the map is an internal map; false, otherwise */ LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); + +/** + * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the + * BPF map should be pinned. This does not actually create the 'pin'. + * @param map The bpf_map + * @param path The path + * @return 0, on success; negative error, otherwise + */ LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); + +/** + * @brief **bpf_map__pin_path()** gets the path attribute that tells where the + * BPF map should be pinned. + * @param map The bpf_map + * @return The path string; which can be NULL + */ LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); + +/** + * @brief **bpf_map__is_pinned()** tells the caller whether or not the + * passed map has been pinned via a 'pin' file. + * @param map The bpf_map + * @return true, if the map is pinned; false, otherwise + */ LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); + +/** + * @brief **bpf_map__pin()** creates a file that serves as a 'pin' + * for the BPF map. This increments the reference count on the + * BPF map which will keep the BPF map loaded even after the + * userspace process which loaded it has exited. + * @param map The bpf_map to pin + * @param path A file path for the 'pin' + * @return 0, on success; negative error, otherwise + * + * If `path` is NULL the maps `pin_path` attribute will be used. If this is + * also NULL, an error will be returned and the map will not be pinned. + */ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); + +/** + * @brief **bpf_map__unpin()** removes the file that serves as a + * 'pin' for the BPF map. + * @param map The bpf_map to unpin + * @param path A file path for the 'pin' + * @return 0, on success; negative error, otherwise + * + * The `path` parameter can be NULL, in which case the `pin_path` + * map attribute is unpinned. If both the `path` parameter and + * `pin_path` map attribute are set, they must be equal. + */ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); @@ -957,9 +1075,10 @@ struct bpf_xdp_query_opts { __u32 hw_prog_id; /* output */ __u32 skb_prog_id; /* output */ __u8 attach_mode; /* output */ + __u64 feature_flags; /* output */ size_t :0; }; -#define bpf_xdp_query_opts__last_field attach_mode +#define bpf_xdp_query_opts__last_field feature_flags LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts); @@ -1018,7 +1137,7 @@ struct user_ring_buffer; typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); struct ring_buffer_opts { - size_t sz; /* size of this struct, for forward/backward compatiblity */ + size_t sz; /* size of this struct, for forward/backward compatibility */ }; #define ring_buffer_opts__last_field sz @@ -1039,7 +1158,8 @@ struct user_ring_buffer_opts { #define user_ring_buffer_opts__last_field sz -/* @brief **user_ring_buffer__new()** creates a new instance of a user ring +/** + * @brief **user_ring_buffer__new()** creates a new instance of a user ring * buffer. * * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map. @@ -1050,7 +1170,8 @@ struct user_ring_buffer_opts { LIBBPF_API struct user_ring_buffer * user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); -/* @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the +/** + * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the * user ring buffer. * @param rb A pointer to a user ring buffer. * @param size The size of the sample, in bytes. @@ -1070,7 +1191,8 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); */ LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); -/* @brief **user_ring_buffer__reserve_blocking()** reserves a record in the +/** + * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes * available. * @param rb The user ring buffer. @@ -1114,7 +1236,8 @@ LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms); -/* @brief **user_ring_buffer__submit()** submits a previously reserved sample +/** + * @brief **user_ring_buffer__submit()** submits a previously reserved sample * into the ring buffer. * @param rb The user ring buffer. * @param sample A reserved sample. @@ -1124,7 +1247,8 @@ LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, */ LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); -/* @brief **user_ring_buffer__discard()** discards a previously reserved sample. +/** + * @brief **user_ring_buffer__discard()** discards a previously reserved sample. * @param rb The user ring buffer. * @param sample A reserved sample. * @@ -1133,7 +1257,8 @@ LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *samp */ LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample); -/* @brief **user_ring_buffer__free()** frees a ring buffer that was previously +/** + * @brief **user_ring_buffer__free()** frees a ring buffer that was previously * created with **user_ring_buffer__new()**. * @param rb The user ring buffer being freed. */ @@ -1149,8 +1274,10 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); /* common use perf buffer options */ struct perf_buffer_opts { size_t sz; + __u32 sample_period; + size_t :0; }; -#define perf_buffer_opts__last_field sz +#define perf_buffer_opts__last_field sample_period /** * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified @@ -1375,7 +1502,7 @@ LIBBPF_API void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); struct gen_loader_opts { - size_t sz; /* size of this struct, for forward/backward compatiblity */ + size_t sz; /* size of this struct, for forward/backward compatibility */ const char *data; const char *insns; __u32 data_sz; @@ -1393,13 +1520,13 @@ enum libbpf_tristate { }; struct bpf_linker_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; }; #define bpf_linker_opts__last_field sz struct bpf_linker_file_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; }; #define bpf_linker_file_opts__last_field sz @@ -1442,7 +1569,7 @@ typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cook struct bpf_link **link); struct libbpf_prog_handler_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* User-provided value that is passed to prog_setup_fn, * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to diff --git a/contrib/libbpf/src/libbpf.map b/contrib/libbpf/src/libbpf.map index 71bf5691..a5aa3a38 100644 --- a/contrib/libbpf/src/libbpf.map +++ b/contrib/libbpf/src/libbpf.map @@ -382,3 +382,12 @@ LIBBPF_1.1.0 { user_ring_buffer__reserve_blocking; user_ring_buffer__submit; } LIBBPF_1.0.0; + +LIBBPF_1.2.0 { + global: + bpf_btf_get_info_by_fd; + bpf_link__update_map; + bpf_link_get_info_by_fd; + bpf_map_get_info_by_fd; + bpf_prog_get_info_by_fd; +} LIBBPF_1.1.0; diff --git a/contrib/libbpf/src/libbpf_internal.h b/contrib/libbpf/src/libbpf_internal.h index 377642ff..e4d05662 100644 --- a/contrib/libbpf/src/libbpf_internal.h +++ b/contrib/libbpf/src/libbpf_internal.h @@ -543,6 +543,7 @@ static inline int ensure_good_fd(int fd) fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); saved_errno = errno; close(old_fd); + errno = saved_errno; if (fd < 0) { pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno); errno = saved_errno; diff --git a/contrib/libbpf/src/libbpf_probes.c b/contrib/libbpf/src/libbpf_probes.c index b44fcbb4..4f3bc968 100644 --- a/contrib/libbpf/src/libbpf_probes.c +++ b/contrib/libbpf/src/libbpf_probes.c @@ -12,11 +12,94 @@ #include #include #include +#include #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" +/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release, + * but Ubuntu provides /proc/version_signature file, as described at + * https://ubuntu.com/kernel, with an example contents below, which we + * can use to get a proper LINUX_VERSION_CODE. + * + * Ubuntu 5.4.0-12.15-generic 5.4.8 + * + * In the above, 5.4.8 is what kernel is actually expecting, while + * uname() call will return 5.4.0 in info.release. + */ +static __u32 get_ubuntu_kernel_version(void) +{ + const char *ubuntu_kver_file = "/proc/version_signature"; + __u32 major, minor, patch; + int ret; + FILE *f; + + if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0) + return 0; + + f = fopen(ubuntu_kver_file, "r"); + if (!f) + return 0; + + ret = fscanf(f, "%*s %*s %u.%u.%u\n", &major, &minor, &patch); + fclose(f); + if (ret != 3) + return 0; + + return KERNEL_VERSION(major, minor, patch); +} + +/* On Debian LINUX_VERSION_CODE doesn't correspond to info.release. + * Instead, it is provided in info.version. An example content of + * Debian 10 looks like the below. + * + * utsname::release 4.19.0-22-amd64 + * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29) + * + * In the above, 4.19.260 is what kernel is actually expecting, while + * uname() call will return 4.19.0 in info.release. + */ +static __u32 get_debian_kernel_version(struct utsname *info) +{ + __u32 major, minor, patch; + char *p; + + p = strstr(info->version, "Debian "); + if (!p) { + /* This is not a Debian kernel. */ + return 0; + } + + if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3) + return 0; + + return KERNEL_VERSION(major, minor, patch); +} + +__u32 get_kernel_version(void) +{ + __u32 major, minor, patch, version; + struct utsname info; + + /* Check if this is an Ubuntu kernel. */ + version = get_ubuntu_kernel_version(); + if (version != 0) + return version; + + uname(&info); + + /* Check if this is a Debian kernel. */ + version = get_debian_kernel_version(&info); + if (version != 0) + return version; + + if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) + return 0; + + return KERNEL_VERSION(major, minor, patch); +} + static int probe_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insns_cnt, char *log_buf, size_t log_buf_sz) diff --git a/contrib/libbpf/src/libbpf_version.h b/contrib/libbpf/src/libbpf_version.h index e944f5bc..1fd2eeac 100644 --- a/contrib/libbpf/src/libbpf_version.h +++ b/contrib/libbpf/src/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 1 -#define LIBBPF_MINOR_VERSION 1 +#define LIBBPF_MINOR_VERSION 2 #endif /* __LIBBPF_VERSION_H */ diff --git a/contrib/libbpf/src/linker.c b/contrib/libbpf/src/linker.c index 4ac02c28..5ced96d9 100644 --- a/contrib/libbpf/src/linker.c +++ b/contrib/libbpf/src/linker.c @@ -1115,7 +1115,19 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src if (src->shdr->sh_type != SHT_NOBITS) { tmp = realloc(dst->raw_data, dst_final_sz); - if (!tmp) + /* If dst_align_sz == 0, realloc() behaves in a special way: + * 1. When dst->raw_data is NULL it returns: + * "either NULL or a pointer suitable to be passed to free()" [1]. + * 2. When dst->raw_data is not-NULL it frees dst->raw_data and returns NULL, + * thus invalidating any "pointer suitable to be passed to free()" obtained + * at step (1). + * + * The dst_align_sz > 0 check avoids error exit after (2), otherwise + * dst->raw_data would be freed again in bpf_linker__free(). + * + * [1] man 3 realloc + */ + if (!tmp && dst_align_sz > 0) return -ENOMEM; dst->raw_data = tmp; @@ -1997,7 +2009,6 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj) { struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx]; - struct dst_sec *dst_symtab; int i, err; for (i = 1; i < obj->sec_cnt; i++) { @@ -2030,9 +2041,6 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob return -1; } - /* add_dst_sec() above could have invalidated linker->secs */ - dst_symtab = &linker->secs[linker->symtab_sec_idx]; - /* shdr->sh_link points to SYMTAB */ dst_sec->shdr->sh_link = linker->symtab_sec_idx; @@ -2049,16 +2057,13 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob dst_rel = dst_sec->raw_data + src_sec->dst_off; n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize; for (j = 0; j < n; j++, src_rel++, dst_rel++) { - size_t src_sym_idx = ELF64_R_SYM(src_rel->r_info); - size_t sym_type = ELF64_R_TYPE(src_rel->r_info); - Elf64_Sym *src_sym, *dst_sym; - size_t dst_sym_idx; + size_t src_sym_idx, dst_sym_idx, sym_type; + Elf64_Sym *src_sym; src_sym_idx = ELF64_R_SYM(src_rel->r_info); src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx; dst_sym_idx = obj->sym_map[src_sym_idx]; - dst_sym = dst_symtab->raw_data + sizeof(*dst_sym) * dst_sym_idx; dst_rel->r_offset += src_linked_sec->dst_off; sym_type = ELF64_R_TYPE(src_rel->r_info); dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type); diff --git a/contrib/libbpf/src/netlink.c b/contrib/libbpf/src/netlink.c index 35104580..84dd5fa1 100644 --- a/contrib/libbpf/src/netlink.c +++ b/contrib/libbpf/src/netlink.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -39,9 +40,15 @@ struct xdp_id_md { int ifindex; __u32 flags; struct xdp_link_info info; + __u64 feature_flags; }; -static int libbpf_netlink_open(__u32 *nl_pid) +struct xdp_features_md { + int ifindex; + __u64 flags; +}; + +static int libbpf_netlink_open(__u32 *nl_pid, int proto) { struct sockaddr_nl sa; socklen_t addrlen; @@ -51,7 +58,7 @@ static int libbpf_netlink_open(__u32 *nl_pid) memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; - sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); + sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto); if (sock < 0) return -errno; @@ -212,14 +219,14 @@ static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq, } static int libbpf_netlink_send_recv(struct libbpf_nla_req *req, - __dump_nlmsg_t parse_msg, + int proto, __dump_nlmsg_t parse_msg, libbpf_dump_nlmsg_t parse_attr, void *cookie) { __u32 nl_pid = 0; int sock, ret; - sock = libbpf_netlink_open(&nl_pid); + sock = libbpf_netlink_open(&nl_pid, proto); if (sock < 0) return sock; @@ -238,6 +245,43 @@ static int libbpf_netlink_send_recv(struct libbpf_nla_req *req, return ret; } +static int parse_genl_family_id(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, + void *cookie) +{ + struct genlmsghdr *gnl = NLMSG_DATA(nh); + struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN); + struct nlattr *tb[CTRL_ATTR_FAMILY_ID + 1]; + __u16 *id = cookie; + + libbpf_nla_parse(tb, CTRL_ATTR_FAMILY_ID, na, + NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL); + if (!tb[CTRL_ATTR_FAMILY_ID]) + return NL_CONT; + + *id = libbpf_nla_getattr_u16(tb[CTRL_ATTR_FAMILY_ID]); + return NL_DONE; +} + +static int libbpf_netlink_resolve_genl_family_id(const char *name, + __u16 len, __u16 *id) +{ + struct libbpf_nla_req req = { + .nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN), + .nh.nlmsg_type = GENL_ID_CTRL, + .nh.nlmsg_flags = NLM_F_REQUEST, + .gnl.cmd = CTRL_CMD_GETFAMILY, + .gnl.version = 2, + }; + int err; + + err = nlattr_add(&req, CTRL_ATTR_FAMILY_NAME, name, len); + if (err < 0) + return err; + + return libbpf_netlink_send_recv(&req, NETLINK_GENERIC, + parse_genl_family_id, NULL, id); +} + static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, __u32 flags) { @@ -271,7 +315,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, } nlattr_end_nested(&req, nla); - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts) @@ -357,6 +401,29 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) return 0; } +static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, + void *cookie) +{ + struct genlmsghdr *gnl = NLMSG_DATA(nh); + struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN); + struct nlattr *tb[NETDEV_CMD_MAX + 1]; + struct xdp_features_md *md = cookie; + __u32 ifindex; + + libbpf_nla_parse(tb, NETDEV_CMD_MAX, na, + NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL); + + if (!tb[NETDEV_A_DEV_IFINDEX] || !tb[NETDEV_A_DEV_XDP_FEATURES]) + return NL_CONT; + + ifindex = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_IFINDEX]); + if (ifindex != md->ifindex) + return NL_CONT; + + md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]); + return NL_DONE; +} + int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) { struct libbpf_nla_req req = { @@ -366,6 +433,10 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) .ifinfo.ifi_family = AF_PACKET, }; struct xdp_id_md xdp_id = {}; + struct xdp_features_md md = { + .ifindex = ifindex, + }; + __u16 id; int err; if (!OPTS_VALID(opts, bpf_xdp_query_opts)) @@ -382,7 +453,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) xdp_id.ifindex = ifindex; xdp_id.flags = xdp_flags; - err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg, + err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg, get_xdp_info, &xdp_id); if (err) return libbpf_err(err); @@ -393,6 +464,37 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id); OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode); + if (!OPTS_HAS(opts, feature_flags)) + return 0; + + err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id); + if (err < 0) { + if (err == -ENOENT) { + opts->feature_flags = 0; + goto skip_feature_flags; + } + return libbpf_err(err); + } + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + req.nh.nlmsg_flags = NLM_F_REQUEST; + req.nh.nlmsg_type = id; + req.gnl.cmd = NETDEV_CMD_DEV_GET; + req.gnl.version = 2; + + err = nlattr_add(&req, NETDEV_A_DEV_IFINDEX, &ifindex, sizeof(ifindex)); + if (err < 0) + return libbpf_err(err); + + err = libbpf_netlink_send_recv(&req, NETLINK_GENERIC, + parse_xdp_features, NULL, &md); + if (err) + return libbpf_err(err); + + opts->feature_flags = md.flags; + +skip_feature_flags: return 0; } @@ -493,7 +595,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags) if (ret < 0) return ret; - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } static int tc_qdisc_create_excl(struct bpf_tc_hook *hook) @@ -593,7 +695,7 @@ static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) int len, ret; memset(&info, 0, info_len); - ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); + ret = bpf_prog_get_info_by_fd(fd, &info, &info_len); if (ret < 0) return ret; @@ -673,7 +775,8 @@ int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) info.opts = opts; - ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, + &info); if (ret < 0) return libbpf_err(ret); if (!info.processed) @@ -739,7 +842,7 @@ static int __bpf_tc_detach(const struct bpf_tc_hook *hook, return ret; } - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_tc_detach(const struct bpf_tc_hook *hook, @@ -804,7 +907,8 @@ int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) info.opts = opts; - ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, + &info); if (ret < 0) return libbpf_err(ret); if (!info.processed) diff --git a/contrib/libbpf/src/nlattr.c b/contrib/libbpf/src/nlattr.c index 3900d052..975e265e 100644 --- a/contrib/libbpf/src/nlattr.c +++ b/contrib/libbpf/src/nlattr.c @@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) hlen += nlmsg_len(&err->msg); attr = (struct nlattr *) ((void *) err + hlen); - alen = nlh->nlmsg_len - hlen; + alen = (void *)nlh + nlh->nlmsg_len - (void *)attr; if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { diff --git a/contrib/libbpf/src/nlattr.h b/contrib/libbpf/src/nlattr.h index 4d15ae2f..d92d1c1d 100644 --- a/contrib/libbpf/src/nlattr.h +++ b/contrib/libbpf/src/nlattr.h @@ -14,6 +14,7 @@ #include #include #include +#include /* avoid multiple definition of netlink features */ #define __LINUX_NETLINK_H @@ -58,6 +59,7 @@ struct libbpf_nla_req { union { struct ifinfomsg ifinfo; struct tcmsg tc; + struct genlmsghdr gnl; }; char buf[128]; }; @@ -89,11 +91,21 @@ static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) return *(uint8_t *)libbpf_nla_data(nla); } +static inline uint16_t libbpf_nla_getattr_u16(const struct nlattr *nla) +{ + return *(uint16_t *)libbpf_nla_data(nla); +} + static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) { return *(uint32_t *)libbpf_nla_data(nla); } +static inline uint64_t libbpf_nla_getattr_u64(const struct nlattr *nla) +{ + return *(uint64_t *)libbpf_nla_data(nla); +} + static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) { return (const char *)libbpf_nla_data(nla); diff --git a/contrib/libbpf/src/relo_core.c b/contrib/libbpf/src/relo_core.c index c4b0e81a..a26b2f5f 100644 --- a/contrib/libbpf/src/relo_core.c +++ b/contrib/libbpf/src/relo_core.c @@ -1551,9 +1551,6 @@ int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const st if (level <= 0) return -EINVAL; - local_t = btf_type_by_id(local_btf, local_id); - targ_t = btf_type_by_id(targ_btf, targ_id); - recur: depth--; if (depth < 0) diff --git a/contrib/libbpf/src/ringbuf.c b/contrib/libbpf/src/ringbuf.c index 47855af2..02199364 100644 --- a/contrib/libbpf/src/ringbuf.c +++ b/contrib/libbpf/src/ringbuf.c @@ -83,7 +83,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", @@ -359,7 +359,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err); diff --git a/contrib/libbpf/src/usdt.bpf.h b/contrib/libbpf/src/usdt.bpf.h index fdfd235e..0bd4c135 100644 --- a/contrib/libbpf/src/usdt.bpf.h +++ b/contrib/libbpf/src/usdt.bpf.h @@ -130,7 +130,10 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res) if (!spec) return -ESRCH; - if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt) + if (arg_num >= BPF_USDT_MAX_ARG_CNT) + return -ENOENT; + barrier_var(arg_num); + if (arg_num >= spec->arg_cnt) return -ENOENT; arg_spec = &spec->args[arg_num]; diff --git a/contrib/libbpf/src/usdt.c b/contrib/libbpf/src/usdt.c index 8940fe61..577267fe 100644 --- a/contrib/libbpf/src/usdt.c +++ b/contrib/libbpf/src/usdt.c @@ -855,8 +855,11 @@ static int bpf_link_usdt_detach(struct bpf_link *link) * system is so exhausted on memory, it's the least of user's * concerns, probably. * So just do our best here to return those IDs to usdt_manager. + * Another edge case when we can legitimately get NULL is when + * new_cnt is zero, which can happen in some edge cases, so we + * need to be careful about that. */ - if (new_free_ids) { + if (new_free_ids || new_cnt == 0) { memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids, usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids)); man->free_spec_ids = new_free_ids; @@ -1144,12 +1147,13 @@ static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr, return 0; } -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg); +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz); static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie) { + struct usdt_arg_spec *arg; const char *s; - int len; + int arg_sz, len; spec->usdt_cookie = usdt_cookie; spec->arg_cnt = 0; @@ -1162,10 +1166,25 @@ static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, return -E2BIG; } - len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]); + arg = &spec->args[spec->arg_cnt]; + len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz); if (len < 0) return len; + arg->arg_signed = arg_sz < 0; + if (arg_sz < 0) + arg_sz = -arg_sz; + + switch (arg_sz) { + case 1: case 2: case 4: case 8: + arg->arg_bitshift = 64 - arg_sz * 8; + break; + default: + pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", + spec->arg_cnt, s, arg_sz); + return -EINVAL; + } + s += len; spec->arg_cnt++; } @@ -1222,13 +1241,13 @@ static int calc_pt_regs_off(const char *reg_name) return -ENOENT; } -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; - int arg_sz, len, reg_off; + int len, reg_off; long off; - if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", &arg_sz, &off, reg_name, &len) == 3) { + if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) { /* Memory dereference case, e.g., -4@-20(%rbp) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; @@ -1236,7 +1255,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", &arg_sz, reg_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) { /* Memory dereference case without offset, e.g., 8@(%rsp) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; @@ -1244,7 +1263,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ %%%15s %n", &arg_sz, reg_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -4@%eax */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; @@ -1253,7 +1272,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) { + } else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@$71 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; @@ -1263,20 +1282,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } - arg->arg_signed = arg_sz < 0; - if (arg_sz < 0) - arg_sz = -arg_sz; - - switch (arg_sz) { - case 1: case 2: case 4: case 8: - arg->arg_bitshift = 64 - arg_sz * 8; - break; - default: - pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", - arg_num, arg_str, arg_sz); - return -EINVAL; - } - return len; } @@ -1284,13 +1289,13 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */ -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { unsigned int reg; - int arg_sz, len; + int len; long off; - if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, ®, &len) == 3) { + if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, ®, &len) == 3) { /* Memory dereference case, e.g., -2@-28(%r15) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; @@ -1299,7 +1304,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } arg->reg_off = offsetof(user_pt_regs, gprs[reg]); - } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, ®, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, ®, &len) == 2) { /* Register read case, e.g., -8@%r0 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; @@ -1308,7 +1313,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } arg->reg_off = offsetof(user_pt_regs, gprs[reg]); - } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@71 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; @@ -1318,20 +1323,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } - arg->arg_signed = arg_sz < 0; - if (arg_sz < 0) - arg_sz = -arg_sz; - - switch (arg_sz) { - case 1: case 2: case 4: case 8: - arg->arg_bitshift = 64 - arg_sz * 8; - break; - default: - pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", - arg_num, arg_str, arg_sz); - return -EINVAL; - } - return len; } @@ -1351,13 +1342,13 @@ static int calc_pt_regs_off(const char *reg_name) return -ENOENT; } -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; - int arg_sz, len, reg_off; + int len, reg_off; long off; - if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) { + if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) { /* Memory dereference case, e.g., -4@[sp, 96] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; @@ -1365,7 +1356,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { /* Memory dereference case, e.g., -4@[sp] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; @@ -1373,12 +1364,12 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@5 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; - } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@x4 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; @@ -1391,20 +1382,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } - arg->arg_signed = arg_sz < 0; - if (arg_sz < 0) - arg_sz = -arg_sz; - - switch (arg_sz) { - case 1: case 2: case 4: case 8: - arg->arg_bitshift = 64 - arg_sz * 8; - break; - default: - pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", - arg_num, arg_str, arg_sz); - return -EINVAL; - } - return len; } @@ -1459,13 +1436,13 @@ static int calc_pt_regs_off(const char *reg_name) return -ENOENT; } -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; - int arg_sz, len, reg_off; + int len, reg_off; long off; - if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", &arg_sz, &off, reg_name, &len) == 3) { + if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) { /* Memory dereference case, e.g., -8@-88(s0) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; @@ -1473,12 +1450,12 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec if (reg_off < 0) return reg_off; arg->reg_off = reg_off; - } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@5 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; - } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) { + } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@a1 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; @@ -1491,17 +1468,83 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec return -EINVAL; } - arg->arg_signed = arg_sz < 0; - if (arg_sz < 0) - arg_sz = -arg_sz; + return len; +} - switch (arg_sz) { - case 1: case 2: case 4: case 8: - arg->arg_bitshift = 64 - arg_sz * 8; - break; - default: - pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", - arg_num, arg_str, arg_sz); +#elif defined(__arm__) + +static int calc_pt_regs_off(const char *reg_name) +{ + static struct { + const char *name; + size_t pt_regs_off; + } reg_map[] = { + { "r0", offsetof(struct pt_regs, uregs[0]) }, + { "r1", offsetof(struct pt_regs, uregs[1]) }, + { "r2", offsetof(struct pt_regs, uregs[2]) }, + { "r3", offsetof(struct pt_regs, uregs[3]) }, + { "r4", offsetof(struct pt_regs, uregs[4]) }, + { "r5", offsetof(struct pt_regs, uregs[5]) }, + { "r6", offsetof(struct pt_regs, uregs[6]) }, + { "r7", offsetof(struct pt_regs, uregs[7]) }, + { "r8", offsetof(struct pt_regs, uregs[8]) }, + { "r9", offsetof(struct pt_regs, uregs[9]) }, + { "r10", offsetof(struct pt_regs, uregs[10]) }, + { "fp", offsetof(struct pt_regs, uregs[11]) }, + { "ip", offsetof(struct pt_regs, uregs[12]) }, + { "sp", offsetof(struct pt_regs, uregs[13]) }, + { "lr", offsetof(struct pt_regs, uregs[14]) }, + { "pc", offsetof(struct pt_regs, uregs[15]) }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(reg_map); i++) { + if (strcmp(reg_name, reg_map[i].name) == 0) + return reg_map[i].pt_regs_off; + } + + pr_warn("usdt: unrecognized register '%s'\n", reg_name); + return -ENOENT; +} + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) +{ + char reg_name[16]; + int len, reg_off; + long off; + + if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n", + arg_sz, reg_name, &off, &len) == 3) { + /* Memory dereference case, e.g., -4@[fp, #96] */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = off; + reg_off = calc_pt_regs_off(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { + /* Memory dereference case, e.g., -4@[sp] */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = 0; + reg_off = calc_pt_regs_off(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) { + /* Constant value case, e.g., 4@#5 */ + arg->arg_type = USDT_ARG_CONST; + arg->val_off = off; + arg->reg_off = 0; + } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { + /* Register read case, e.g., -8@r4 */ + arg->arg_type = USDT_ARG_REG; + arg->val_off = 0; + reg_off = calc_pt_regs_off(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else { + pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } @@ -1510,7 +1553,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec #else -static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n"); return -ENOTSUP; diff --git a/contrib/libbpf/src/zip.c b/contrib/libbpf/src/zip.c new file mode 100644 index 00000000..3f26d629 --- /dev/null +++ b/contrib/libbpf/src/zip.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* + * Routines for dealing with .zip archives. + * + * Copyright (c) Meta Platforms, Inc. and affiliates. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "libbpf_internal.h" +#include "zip.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpacked" +#pragma GCC diagnostic ignored "-Wattributes" + +/* Specification of ZIP file format can be found here: + * https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT + * For a high level overview of the structure of a ZIP file see + * sections 4.3.1 - 4.3.6. + * + * Data structures appearing in ZIP files do not contain any + * padding and they might be misaligned. To allow us to safely + * operate on pointers to such structures and their members, we + * declare the types as packed. + */ + +#define END_OF_CD_RECORD_MAGIC 0x06054b50 + +/* See section 4.3.16 of the spec. */ +struct end_of_cd_record { + /* Magic value equal to END_OF_CD_RECORD_MAGIC */ + __u32 magic; + + /* Number of the file containing this structure or 0xFFFF if ZIP64 archive. + * Zip archive might span multiple files (disks). + */ + __u16 this_disk; + + /* Number of the file containing the beginning of the central directory or + * 0xFFFF if ZIP64 archive. + */ + __u16 cd_disk; + + /* Number of central directory records on this disk or 0xFFFF if ZIP64 + * archive. + */ + __u16 cd_records; + + /* Number of central directory records on all disks or 0xFFFF if ZIP64 + * archive. + */ + __u16 cd_records_total; + + /* Size of the central directory record or 0xFFFFFFFF if ZIP64 archive. */ + __u32 cd_size; + + /* Offset of the central directory from the beginning of the archive or + * 0xFFFFFFFF if ZIP64 archive. + */ + __u32 cd_offset; + + /* Length of comment data following end of central directory record. */ + __u16 comment_length; + + /* Up to 64k of arbitrary bytes. */ + /* uint8_t comment[comment_length] */ +} __attribute__((packed)); + +#define CD_FILE_HEADER_MAGIC 0x02014b50 +#define FLAG_ENCRYPTED (1 << 0) +#define FLAG_HAS_DATA_DESCRIPTOR (1 << 3) + +/* See section 4.3.12 of the spec. */ +struct cd_file_header { + /* Magic value equal to CD_FILE_HEADER_MAGIC. */ + __u32 magic; + __u16 version; + /* Minimum zip version needed to extract the file. */ + __u16 min_version; + __u16 flags; + __u16 compression; + __u16 last_modified_time; + __u16 last_modified_date; + __u32 crc; + __u32 compressed_size; + __u32 uncompressed_size; + __u16 file_name_length; + __u16 extra_field_length; + __u16 file_comment_length; + /* Number of the disk where the file starts or 0xFFFF if ZIP64 archive. */ + __u16 disk; + __u16 internal_attributes; + __u32 external_attributes; + /* Offset from the start of the disk containing the local file header to the + * start of the local file header. + */ + __u32 offset; +} __attribute__((packed)); + +#define LOCAL_FILE_HEADER_MAGIC 0x04034b50 + +/* See section 4.3.7 of the spec. */ +struct local_file_header { + /* Magic value equal to LOCAL_FILE_HEADER_MAGIC. */ + __u32 magic; + /* Minimum zip version needed to extract the file. */ + __u16 min_version; + __u16 flags; + __u16 compression; + __u16 last_modified_time; + __u16 last_modified_date; + __u32 crc; + __u32 compressed_size; + __u32 uncompressed_size; + __u16 file_name_length; + __u16 extra_field_length; +} __attribute__((packed)); + +#pragma GCC diagnostic pop + +struct zip_archive { + void *data; + __u32 size; + __u32 cd_offset; + __u32 cd_records; +}; + +static void *check_access(struct zip_archive *archive, __u32 offset, __u32 size) +{ + if (offset + size > archive->size || offset > offset + size) + return NULL; + + return archive->data + offset; +} + +/* Returns 0 on success, -EINVAL on error and -ENOTSUP if the eocd indicates the + * archive uses features which are not supported. + */ +static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset) +{ + __u16 comment_length, cd_records; + struct end_of_cd_record *eocd; + __u32 cd_offset, cd_size; + + eocd = check_access(archive, offset, sizeof(*eocd)); + if (!eocd || eocd->magic != END_OF_CD_RECORD_MAGIC) + return -EINVAL; + + comment_length = eocd->comment_length; + if (offset + sizeof(*eocd) + comment_length != archive->size) + return -EINVAL; + + cd_records = eocd->cd_records; + if (eocd->this_disk != 0 || eocd->cd_disk != 0 || eocd->cd_records_total != cd_records) + /* This is a valid eocd, but we only support single-file non-ZIP64 archives. */ + return -ENOTSUP; + + cd_offset = eocd->cd_offset; + cd_size = eocd->cd_size; + if (!check_access(archive, cd_offset, cd_size)) + return -EINVAL; + + archive->cd_offset = cd_offset; + archive->cd_records = cd_records; + return 0; +} + +static int find_cd(struct zip_archive *archive) +{ + int64_t limit, offset; + int rc = -EINVAL; + + if (archive->size <= sizeof(struct end_of_cd_record)) + return -EINVAL; + + /* Because the end of central directory ends with a variable length array of + * up to 0xFFFF bytes we can't know exactly where it starts and need to + * search for it at the end of the file, scanning the (limit, offset] range. + */ + offset = archive->size - sizeof(struct end_of_cd_record); + limit = (int64_t)offset - (1 << 16); + + for (; offset >= 0 && offset > limit && rc != 0; offset--) { + rc = try_parse_end_of_cd(archive, offset); + if (rc == -ENOTSUP) + break; + } + return rc; +} + +struct zip_archive *zip_archive_open(const char *path) +{ + struct zip_archive *archive; + int err, fd; + off_t size; + void *data; + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) + return ERR_PTR(-errno); + + size = lseek(fd, 0, SEEK_END); + if (size == (off_t)-1 || size > UINT32_MAX) { + close(fd); + return ERR_PTR(-EINVAL); + } + + data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); + err = -errno; + close(fd); + + if (data == MAP_FAILED) + return ERR_PTR(err); + + archive = malloc(sizeof(*archive)); + if (!archive) { + munmap(data, size); + return ERR_PTR(-ENOMEM); + }; + + archive->data = data; + archive->size = size; + + err = find_cd(archive); + if (err) { + munmap(data, size); + free(archive); + return ERR_PTR(err); + } + + return archive; +} + +void zip_archive_close(struct zip_archive *archive) +{ + munmap(archive->data, archive->size); + free(archive); +} + +static struct local_file_header *local_file_header_at_offset(struct zip_archive *archive, + __u32 offset) +{ + struct local_file_header *lfh; + + lfh = check_access(archive, offset, sizeof(*lfh)); + if (!lfh || lfh->magic != LOCAL_FILE_HEADER_MAGIC) + return NULL; + + return lfh; +} + +static int get_entry_at_offset(struct zip_archive *archive, __u32 offset, struct zip_entry *out) +{ + struct local_file_header *lfh; + __u32 compressed_size; + const char *name; + void *data; + + lfh = local_file_header_at_offset(archive, offset); + if (!lfh) + return -EINVAL; + + offset += sizeof(*lfh); + if ((lfh->flags & FLAG_ENCRYPTED) || (lfh->flags & FLAG_HAS_DATA_DESCRIPTOR)) + return -EINVAL; + + name = check_access(archive, offset, lfh->file_name_length); + if (!name) + return -EINVAL; + + offset += lfh->file_name_length; + if (!check_access(archive, offset, lfh->extra_field_length)) + return -EINVAL; + + offset += lfh->extra_field_length; + compressed_size = lfh->compressed_size; + data = check_access(archive, offset, compressed_size); + if (!data) + return -EINVAL; + + out->compression = lfh->compression; + out->name_length = lfh->file_name_length; + out->name = name; + out->data = data; + out->data_length = compressed_size; + out->data_offset = offset; + + return 0; +} + +int zip_archive_find_entry(struct zip_archive *archive, const char *file_name, + struct zip_entry *out) +{ + size_t file_name_length = strlen(file_name); + __u32 i, offset = archive->cd_offset; + + for (i = 0; i < archive->cd_records; ++i) { + __u16 cdfh_name_length, cdfh_flags; + struct cd_file_header *cdfh; + const char *cdfh_name; + + cdfh = check_access(archive, offset, sizeof(*cdfh)); + if (!cdfh || cdfh->magic != CD_FILE_HEADER_MAGIC) + return -EINVAL; + + offset += sizeof(*cdfh); + cdfh_name_length = cdfh->file_name_length; + cdfh_name = check_access(archive, offset, cdfh_name_length); + if (!cdfh_name) + return -EINVAL; + + cdfh_flags = cdfh->flags; + if ((cdfh_flags & FLAG_ENCRYPTED) == 0 && + (cdfh_flags & FLAG_HAS_DATA_DESCRIPTOR) == 0 && + file_name_length == cdfh_name_length && + memcmp(file_name, archive->data + offset, file_name_length) == 0) { + return get_entry_at_offset(archive, cdfh->offset, out); + } + + offset += cdfh_name_length; + offset += cdfh->extra_field_length; + offset += cdfh->file_comment_length; + } + + return -ENOENT; +} diff --git a/contrib/libbpf/src/zip.h b/contrib/libbpf/src/zip.h new file mode 100644 index 00000000..1c1bb21f --- /dev/null +++ b/contrib/libbpf/src/zip.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +#ifndef __LIBBPF_ZIP_H +#define __LIBBPF_ZIP_H + +#include + +/* Represents an open zip archive. + * Only basic ZIP files are supported, in particular the following are not + * supported: + * - encryption + * - streaming + * - multi-part ZIP files + * - ZIP64 + */ +struct zip_archive; + +/* Carries information on name, compression method, and data corresponding to a + * file in a zip archive. + */ +struct zip_entry { + /* Compression method as defined in pkzip spec. 0 means data is uncompressed. */ + __u16 compression; + + /* Non-null terminated name of the file. */ + const char *name; + /* Length of the file name. */ + __u16 name_length; + + /* Pointer to the file data. */ + const void *data; + /* Length of the file data. */ + __u32 data_length; + /* Offset of the file data within the archive. */ + __u32 data_offset; +}; + +/* Open a zip archive. Returns NULL in case of an error. */ +struct zip_archive *zip_archive_open(const char *path); + +/* Close a zip archive and release resources. */ +void zip_archive_close(struct zip_archive *archive); + +/* Look up an entry corresponding to a file in given zip archive. */ +int zip_archive_find_entry(struct zip_archive *archive, const char *name, struct zip_entry *out); + +#endif diff --git a/contrib/v1.1.0.tar.gz b/contrib/v1.1.0.tar.gz deleted file mode 100644 index b59d8a07..00000000 Binary files a/contrib/v1.1.0.tar.gz and /dev/null differ diff --git a/non-GPL/Events/EventsTrace/EventsTrace.c b/non-GPL/Events/EventsTrace/EventsTrace.c index e1231a5d..02c5e781 100644 --- a/non-GPL/Events/EventsTrace/EventsTrace.c +++ b/non-GPL/Events/EventsTrace/EventsTrace.c @@ -794,7 +794,6 @@ int main(int argc, char **argv) ebpf_set_verbose_logging(); err = ebpf_event_ctx__new(&ctx, event_ctx_callback, g_events_env); - if (err < 0) { fprintf(stderr, "Could not create event context: %d %s\n", err, strerror(-err)); goto out; diff --git a/non-GPL/Events/Lib/EbpfEvents.c b/non-GPL/Events/Lib/EbpfEvents.c index e3fb8ec8..b3a2dc21 100644 --- a/non-GPL/Events/Lib/EbpfEvents.c +++ b/non-GPL/Events/Lib/EbpfEvents.c @@ -60,6 +60,57 @@ static int ring_buf_cb(void *ctx, void *data, size_t size) return 0; } +/* https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-union */ +static int resolve_btf_field_off_recur(struct btf *btf, + int base_off, + const struct btf_type *t, + const char *field) +{ + struct btf_member *m = btf_members(t); + + for (int i = 0; i < btf_vlen(t); i++, m++) { + const char *name = btf__name_by_offset(btf, m->name_off); + if (name == NULL) + continue; + + if (strcmp(name, field) == 0) + return (base_off + m->offset) / 8; + + const struct btf_type *m_type = btf__type_by_id(btf, m->type); + if (!btf_is_struct(m_type) && !btf_is_union(m_type)) + continue; + + // Recursively traverse structs and unions + int ret = resolve_btf_field_off_recur(btf, base_off + m->offset, m_type, field); + if (ret == -1) + continue; + + return ret; + } + + return -1; +} + +/* Find the BTF field relocation offset for a named field of a kernel struct */ +static int resolve_btf_field_off(struct btf *btf, const char *struct_name, const char *field) +{ + int ret = -1; + + __s32 btf_id = btf__find_by_name_kind(btf, struct_name, BTF_KIND_STRUCT); + if (btf_id <= 0) + goto out; + + const struct btf_type *t = btf__type_by_id(btf, btf_id); + if (t == NULL) + goto out; + if (!btf_is_struct(t)) + goto out; + + return resolve_btf_field_off_recur(btf, 0, t, field); +out: + return ret; +} + const struct btf_type *resolve_btf_type_by_func(struct btf *btf, const char *func) { if (func == NULL) { @@ -72,7 +123,6 @@ const struct btf_type *resolve_btf_type_by_func(struct btf *btf, const char *fun continue; const struct btf_type *btf_type_ptr = btf__type_by_id(btf, btf_type); - if (!btf_is_func(btf_type_ptr)) continue; @@ -94,6 +144,7 @@ const struct btf_type *resolve_btf_type_by_func(struct btf *btf, const char *fun } out: + verbose("resolve_btf_type_by_func(%s): not found\n", func); return NULL; } @@ -105,8 +156,6 @@ static int resolve_btf_func_arg_idx(struct btf *btf, const char *func, const cha const struct btf_type *proto_btf_type_ptr = resolve_btf_type_by_func(btf, func); if (!proto_btf_type_ptr) goto out; - if (!arg) - goto out; struct btf_param *params = btf_params(proto_btf_type_ptr); for (int j = 0; j < btf_vlen(proto_btf_type_ptr); j++) { @@ -119,6 +168,7 @@ static int resolve_btf_func_arg_idx(struct btf *btf, const char *func, const cha goto out; } } + verbose("resolve_btf_func_arg_idx(%s, %s): not found\n", func, arg); out: return ret; @@ -147,6 +197,8 @@ static int resolve_btf_func_ret_idx(struct btf *btf, const char *func) int r = resolve_btf_func_arg_idx(btf, #func, #arg); \ if (r >= 0) \ __r = 0; \ + else \ + verbose("fill func arg idx (%s, %s): %d\n", #func, #arg, r); \ obj->rodata->arg__##func##__##arg##__ = r; \ __r; \ }) @@ -158,6 +210,8 @@ static int resolve_btf_func_ret_idx(struct btf *btf, const char *func) int r = resolve_btf_func_ret_idx(btf, #func); \ if (r >= 0) \ __r = 0; \ + else \ + verbose("fill func ret idx (%s): %d\n", #func, r); \ obj->rodata->ret__##func##__ = r; \ __r; \ }) @@ -172,13 +226,39 @@ static int resolve_btf_func_ret_idx(struct btf *btf, const char *func) if (r >= 0) { \ obj->rodata->exists__##func##__##arg##__ = true; \ __r = 0; \ + } else { \ + verbose("fill func arg exists (%s, %s): %d\n", #func, #arg, r); \ } \ __r; \ }) +/* Given a struct name and a field name, returns the field offset + * within the struct. + */ +#define FILL_FIELD_OFFSET(obj, btf, struct, field) \ + ({ \ + int __r = -1; \ + int r = resolve_btf_field_off(btf, #struct, #field); \ + if (r >= 0) \ + __r = 0; \ + else \ + verbose("fill field offset (%s, %s): %d\n", #struct, #field, r); \ + obj->rodata->off__##struct##__##field##__ = r; \ + __r; \ + }) + /* Given a function name, returns whether it exists in the provided BTF. */ #define BTF_FUNC_EXISTS(btf, func) ({ (bool)resolve_btf_type_by_func(btf, #func); }) +/* Given a struct name and a field name, returns whether the field exists within the struct. */ +#define BTF_FIELD_EXISTS(btf, struct, field) \ + ({ \ + bool __r = false; \ + if (resolve_btf_field_off(btf, #struct, #field) >= 0) \ + __r = true; \ + __r; \ + }) + /* Fill context relocations for kernel functions * You can add additional functions here by using the macros defined above. * @@ -198,6 +278,9 @@ static int probe_fill_relos(struct btf *btf, struct EventProbe_bpf *obj) } err = err ?: FILL_FUNC_RET_IDX(obj, btf, vfs_rename); + if (BTF_FIELD_EXISTS(btf, iov_iter, __iov)) + err = err ?: FILL_FIELD_OFFSET(obj, btf, iov_iter, __iov); + return err; } @@ -583,6 +666,7 @@ int ebpf_event_ctx__new(struct ebpf_event_ctx **ctx, ebpf_event_handler_fn cb, u /* EventProbe_bpf__open doesn't report errors, hard to find something * that fits perfect here */ + verbose("EventProbe_bpf__open: %d\n", err); err = -ENOENT; goto out_destroy_probe; } @@ -590,24 +674,34 @@ int ebpf_event_ctx__new(struct ebpf_event_ctx **ctx, ebpf_event_handler_fn cb, u probe->rodata->consumer_pid = getpid(); err = probe_fill_relos(btf, probe); - if (err != 0) + if (err != 0) { + verbose("probe_fill_relos: %d\n", err); goto out_destroy_probe; + } err = probe_resize_maps(probe); - if (err != 0) + if (err != 0) { + verbose("probe_resize_maps: %d\n", err); goto out_destroy_probe; + } err = probe_set_autoload(btf, probe, features); - if (err != 0) + if (err != 0) { + verbose("probe_set_autoload: %d\n", err); goto out_destroy_probe; + } err = EventProbe_bpf__load(probe); - if (err != 0) + if (err != 0) { + verbose("EventProbe_bpf__load: %d\n", err); goto out_destroy_probe; + } err = EventProbe_bpf__attach(probe); - if (err != 0) + if (err != 0) { + verbose("EventProbe_bpf__attach: %d\n", err); goto out_destroy_probe; + } if (!ctx) goto out_destroy_probe; diff --git a/testing/kernel_builder/Dockerfile.new b/testing/kernel_builder/Dockerfile.new new file mode 100644 index 00000000..989251f5 --- /dev/null +++ b/testing/kernel_builder/Dockerfile.new @@ -0,0 +1,19 @@ +from debian:bullseye + +RUN dpkg --add-architecture arm64 +RUN apt-get -y update +RUN apt-get -y install \ + git gcc make libssl-dev bison flex bc libelf-dev python3 \ + gcc-aarch64-linux-gnu curl xz-utils dwarves \ + cmake zlib1g libdw1 libdw-dev + +RUN git clone https://git.kernel.org/pub/scm/devel/pahole/pahole.git /pahole +WORKDIR /pahole +RUN git submodule update --init --recursive +RUN mkdir /pahole/build +WORKDIR /pahole/build +RUN cmake -DBUILD_SHARED_LIBS=OFF .. +RUN make install + +WORKDIR /work +CMD ./build.sh diff --git a/testing/kernel_builder/Dockerfile b/testing/kernel_builder/Dockerfile.old similarity index 73% rename from testing/kernel_builder/Dockerfile rename to testing/kernel_builder/Dockerfile.old index 4b11f806..9d25eba5 100644 --- a/testing/kernel_builder/Dockerfile +++ b/testing/kernel_builder/Dockerfile.old @@ -1,7 +1,6 @@ from debian:bullseye RUN dpkg --add-architecture arm64 -RUN echo "deb http://deb.debian.org/debian bullseye-backports main" >> /etc/apt/sources.list RUN apt-get -y update RUN apt-get -y install \ git gcc make libssl-dev bison flex bc libelf-dev python3 \ diff --git a/testing/kernel_builder/Makefile b/testing/kernel_builder/Makefile index 7d7df0af..a041a87c 100644 --- a/testing/kernel_builder/Makefile +++ b/testing/kernel_builder/Makefile @@ -4,7 +4,9 @@ IMAGE=kernel-builder-local TAG=latest all: - ${CONTAINER_ENGINE} run -v ${PWD}:/work ${IMAGE}:${TAG} + ${CONTAINER_ENGINE} run -v ${PWD}:/work ${IMAGE}-old:${TAG} + ${CONTAINER_ENGINE} run -v ${PWD}:/work ${IMAGE}-new:${TAG} image: - ${CONTAINER_ENGINE} build . -t ${IMAGE}:${TAG} + ${CONTAINER_ENGINE} build -f Dockerfile.old -t ${IMAGE}-old:${TAG} + ${CONTAINER_ENGINE} build -f Dockerfile.new -t ${IMAGE}-new:${TAG} diff --git a/testing/kernel_builder/build.sh b/testing/kernel_builder/build.sh index ca010903..14a8a3e9 100755 --- a/testing/kernel_builder/build.sh +++ b/testing/kernel_builder/build.sh @@ -18,24 +18,30 @@ readonly BUILD_ARCHES=( # We hit every minor release here, and grab a number of different patch # releases from each LTS series (e.g. 5.10, 5.15) -readonly BUILD_VERSIONS=( +readonly BUILD_VERSIONS_PAHOLE_120=( "5.10.16" # Oldest we support - "5.10.50" - "5.10.100" - "5.10.120" "5.10.130" "5.11" "5.12" "5.13" "5.14" "5.15" - "5.15.10" - "5.15.40" - "5.15.60" + "5.15.133" "5.16" "5.17" "5.18" "5.19" + "6.0" + "6.1" + "6.1.55" +) + +readonly BUILD_VERSIONS_PAHOLE_SOURCE=( + "6.2" + "6.3" + "6.4" + "6.4.16" + "6.5" ) exit_error() { @@ -98,9 +104,15 @@ fetch_and_build() { } main() { - for version in ${BUILD_VERSIONS[@]}; do - fetch_and_build $version - done + if [ "$(pahole --version)" = "v1.20" ]; then + for version in ${BUILD_VERSIONS_PAHOLE_120[@]}; do + fetch_and_build $version + done + else + for version in ${BUILD_VERSIONS_PAHOLE_SOURCE[@]}; do + fetch_and_build $version + done + fi } main diff --git a/testing/kernel_builder/config.custom b/testing/kernel_builder/config.custom index 1576c188..36b297db 100644 --- a/testing/kernel_builder/config.custom +++ b/testing/kernel_builder/config.custom @@ -2,15 +2,12 @@ CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_DEBUG_INFO=y # Enable BTF -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_INFO_REDUCED=n - -# Enable /sys/kernel/debug/tracing/events/syscalls -CONFIG_FTRACE_SYSCALLS=y +CONFIG_DEBUG_INFO_DWARF4=y # Enable securityfs CONFIG_SECURITYFS=y @@ -46,6 +43,9 @@ CONFIG_CGROUP_NET_CLASSID=y CONFIG_FTRACE=y CONFIG_FUNCTION_TRACER=y CONFIG_KPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FPROBE=y # Enable IPv6 (not on by default in aarch64 defconfig) CONFIG_IPV6=y diff --git a/testing/testrunner/tests.go b/testing/testrunner/tests.go index 116a1428..8d28de12 100644 --- a/testing/testrunner/tests.go +++ b/testing/testrunner/tests.go @@ -18,28 +18,33 @@ import ( ) func TestFeaturesCorrect(et *EventsTraceInstance) { - var buf syscall.Utsname - if err := syscall.Uname(&buf); err != nil { + var utsname syscall.Utsname + if err := syscall.Uname(&utsname); err != nil { TestFail(fmt.Sprintf("Failed to run uname: %s", err)) } - archBytes := []byte{} - for _, b := range buf.Machine { - if b == 0 { - break + int8ArrayToString := func(arr [65]int8) string { + var buf []byte + for _, el := range arr { + if el == 0 { + break + } + buf = append(buf, byte(el)) } - - archBytes = append(archBytes, byte(b)) + return string(buf) + } + contains := func(s []string, str string) bool { + for _, el := range s { + if el == str { + return true + } + } + return false } - arch := string(archBytes) - // BPF trampolines are only supported on x86 at present. - // - // As of June 2022, there is a patchset circulating that will add support - // to ARM64 (https://lwn.net/Articles/899093/). This check should be - // updated when that is merged into the mainline such that it ensures BPF - // trampolines are disabled on all aarch64 kernels pre-. + arch := int8ArrayToString(utsname.Machine) + kernelVersion := int8ArrayToString(utsname.Release) + switch arch { case "x86_64": // All x86 kernels in the CI test matrix currently enable bpf @@ -49,9 +54,15 @@ func TestFeaturesCorrect(et *EventsTraceInstance) { // handle it here. AssertTrue(et.InitMsg.Features.BpfTramp) case "aarch64": - AssertFalse(et.InitMsg.Features.BpfTramp) + hasBpfTramp := []string{"6.4.0", "6.4.16", "6.5.0"} + + if contains(hasBpfTramp, kernelVersion) { + AssertTrue(et.InitMsg.Features.BpfTramp) + } else { + AssertFalse(et.InitMsg.Features.BpfTramp) + } default: - TestFail(fmt.Sprintf("Unknown arch %s, please add to the TestFeaturesCorrect test", arch)) + TestFail(fmt.Sprintf("unknown arch %s, please add to the TestFeaturesCorrect test", arch)) } } diff --git a/testing/testrunner/utils.go b/testing/testrunner/utils.go index 039a2476..a70a8a55 100644 --- a/testing/testrunner/utils.go +++ b/testing/testrunner/utils.go @@ -26,11 +26,11 @@ import ( // This is a JSON type printed by the test binaries (not by EventsTrace), it's // used all over the place, so define it here to save space type TestPidInfo struct { - Tid int64 `json:"tid"` - Tgid int64 `json:"tgid"` - Ppid int64 `json:"ppid"` - Pgid int64 `json:"pgid"` - Sid int64 `json:"sid"` + Tid int64 `json:"tid"` + Tgid int64 `json:"tgid"` + Ppid int64 `json:"ppid"` + Pgid int64 `json:"pgid"` + Sid int64 `json:"sid"` CapPermitted uint64 `json:"cap_permitted,string"` CapEffective uint64 `json:"cap_effective,string"` } @@ -53,12 +53,12 @@ type PidInfo struct { } type CredInfo struct { - Ruid int64 `json:"ruid"` - Rgid int64 `json:"rgid"` - Euid int64 `json:"euid"` - Egid int64 `json:"egid"` - Suid int64 `json:"suid"` - Sgid int64 `json:"sgid"` + Ruid int64 `json:"ruid"` + Rgid int64 `json:"rgid"` + Euid int64 `json:"euid"` + Egid int64 `json:"egid"` + Suid int64 `json:"suid"` + Sgid int64 `json:"sgid"` CapPermitted uint64 `json:"cap_permitted,string"` CapEffective uint64 `json:"cap_effective,string"` } @@ -79,9 +79,9 @@ type NetInfo struct { } type ProcessForkEvent struct { - ParentPids PidInfo `json:"parent_pids"` - ChildPids PidInfo `json:"child_pids"` - Creds CredInfo `json:"creds"` + ParentPids PidInfo `json:"parent_pids"` + ChildPids PidInfo `json:"child_pids"` + Creds CredInfo `json:"creds"` } type ProcessExecEvent struct {