Improve FUSE, run all tests with it

The FUSE driver, up to now, had no way to know when the user had
"explicitly" closed a file. Instead it linked the TernFS file on
flush, which could cause nasty situation. The classic example
is a fork causing the FD to a TernFS file being present in the forked
process, and then the process dying causing a spurious flush.

This commit adds a way to detect when a flush is due to a close(),
which allows us to link the file only in the cases where that happened,
which is a much better heuristic and close to what we do in the kernel
module.

This commit also contains various other improvements to make all tests
pass under FUSE. The big remaining item is changing how files are read
(they're currently read all upfront and then kept in memory).
This commit is contained in:
Francesco Mazzoli
2025-09-10 16:27:19 +00:00
committed by Francesco Mazzoli
parent 3d592a8311
commit 01f9d5addf
477 changed files with 110739 additions and 23861 deletions

View File

@@ -99,6 +99,16 @@ jobs:
run: apt-get install -y build-essential
- name: Run functional tests
run: ./ci.py --functional --docker --build
- name: Build bpftools
# As of now (2025-09-12) the AWS-packaged ubuntu 24.04 does not seem to have
# the right bpftools packages, so build from source.
run: |
apt-get remove -y linux-tools-*
apt-get install -y git build-essential libbpf-dev clang llvm
git clone --recurse-submodules https://github.com/libbpf/bpftool.git
cd bpftool/src
make -j install
ln -s /usr/local/sbin/bpftool /usr/sbin/bpftool
- name: Run integration tests
run: ./ci.py --build --short --integration --docker
- name: Install packages for kmod CI

10
ci.py
View File

@@ -18,11 +18,9 @@ parser.add_argument('--build', action='store_true')
parser.add_argument('--docker', action='store_true', help='Build and run in docker image')
parser.add_argument('--prepare-image', default=None, type=str, help='Build the kmod image given the provided base image')
parser.add_argument('--leader-only', action='store_true', help='Run only LogsDB leader with LEADER_NO_FOLLOWERS')
parser.add_argument('--close-tracker-object', default=None, type=str, help='Run fuse driver with the given close tracker object')
args = parser.parse_args()
os.environ['PATH'] = f'/opt/go1.18.4/bin:{os.environ["PATH"]}'
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
@@ -36,7 +34,7 @@ if args.functional:
bold_print('functional tests')
if args.docker:
bold_print('starting functional tests in docker')
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-03'
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-18'
# See <https://groups.google.com/g/seastar-dev/c/r7W-Kqzy9O4>
# for motivation for `--security-opt seccomp=unconfined`,
# the `--pids-limit -1` is not something I hit but it seems
@@ -58,13 +56,13 @@ if args.functional:
if args.integration:
if args.docker:
bold_print('starting integration tests in docker')
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-03'
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-18'
# See <https://groups.google.com/g/seastar-dev/c/r7W-Kqzy9O4>
# for motivation for `--security-opt seccomp=unconfined`,
# the `--pids-limit -1` is not something I hit but it seems
# like a good idea.
run_cmd_unbuffered(
['docker', 'run', '--pids-limit', '-1', '--security-opt', 'seccomp=unconfined', '--cap-add', 'SYS_ADMIN', '-v', '/dev/fuse:/dev/fuse', '--privileged', '--rm', '-i', '--mount', f'type=bind,src={script_dir},dst=/eggsfs', '-e', f'UID={os.getuid()}', '-e', f'GID={os.getgid()}', container, '/eggsfs/integration.py', '--docker'] + (['--short'] if args.short else []) + (['--leader-only'] if args.leader_only else [])
['docker', 'run', '--pids-limit', '-1', '--security-opt', 'seccomp=unconfined', '--cap-add', 'SYS_ADMIN', '-v', '/dev/fuse:/dev/fuse', '-v', '/sys/kernel:/sys/kernel', '--privileged', '--rm', '-i', '--mount', f'type=bind,src={script_dir},dst=/ternfs', '-e', f'UID={os.getuid()}', '-e', f'GID={os.getgid()}', container, '/ternfs/integration.py', '--docker'] + (['--short'] if args.short else []) + (['--leader-only'] if args.leader_only else []) + (['--close-tracker-object', args.close_tracker_object] if args.close_tracker_object else [])
)
else:
run_cmd_unbuffered(

View File

@@ -1,12 +1,9 @@
# Copyright 2025 XTX Markets Technologies Limited
#
# SPDX-License-Identifier: GPL-2.0-or-later
# The image we use to build the static "alpine" binaries
# that we deploy.
# We are staying on 3.18 and patching in go1.22 instead of moving to 3.20
# due to issues with compiling the go-sqlite3 lib on 3.20.
# We intend to drop the alpine build entirely, see https://internal-repo/issues/336.
# The image we use to build the static "alpine" binaries.
# We are staying on 3.18 and patching in recent go due to the RocksDB we're
# using not playing well with the compilers shipped in 3.22.
FROM alpine:3.18
LABEL org.opencontainers.image.source https://github.com/XTXMarkets/ternfs
RUN set -eu
@@ -15,8 +12,8 @@ RUN apk add ca-certificates
RUN /usr/sbin/update-ca-certificates
RUN apk add --no-cache bash perl coreutils python3 musl gcc g++ clang lld make cmake ninja mandoc linux-headers patch wget
# Explicitly install go outside of apk since the default version is 1.18
RUN wget https://go.dev/dl/go1.22.3.linux-amd64.tar.gz
RUN echo 8920ea521bad8f6b7bc377b4824982e011c19af27df88a815e3586ea895f1b36 go1.22.3.linux-amd64.tar.gz | sha256sum --check
RUN tar -C /usr/local -xzf go1.22.3.linux-amd64.tar.gz; rm go1.22.3.linux-amd64.tar.gz
RUN wget https://go.dev/dl/go1.25.1.linux-amd64.tar.gz
RUN echo 7716a0d940a0f6ae8e1f3b3f4f36299dc53e31b16840dbd171254312c41ca12e go1.25.1.linux-amd64.tar.gz | sha256sum --check
RUN tar -C /usr/local -xzf go1.25.1.linux-amd64.tar.gz; rm go1.25.1.linux-amd64.tar.gz
ENV PATH="${PATH}:/usr/local/go/bin"
ENV IN_TERN_BUILD_CONTAINER Y
ENV IN_TERN_BUILD_CONTAINER Y

View File

@@ -1,19 +1,10 @@
# Copyright 2025 XTX Markets Technologies Limited
#
# SPDX-License-Identifier: GPL-2.0-or-later
FROM ubuntu:22.04
FROM ubuntu:noble-20250910
LABEL org.opencontainers.image.source https://github.com/XTXMarkets/ternfs
RUN set -eu
# See <https://stackoverflow.com/questions/71941032/why-i-cannot-run-apt-update-inside-a-fresh-ubuntu22-04>
RUN rm -f /etc/apt/apt.conf.d/docker-clean
RUN apt-get update
RUN apt-get install -y python3 gcc g++ clang lld make cmake ninja-build mandoc build-essential git fuse valgrind llvm ca-certificates wget
RUN apt-get install -y python3 gcc g++ clang lld make cmake ninja-build mandoc build-essential git fuse valgrind llvm wget rsync golang-go
RUN apt-get clean
# Explicitly install go outside of apt since the default version is 1.18
RUN /usr/sbin/update-ca-certificates
RUN wget https://go.dev/dl/go1.22.3.linux-amd64.tar.gz
RUN echo 8920ea521bad8f6b7bc377b4824982e011c19af27df88a815e3586ea895f1b36 go1.22.3.linux-amd64.tar.gz | sha256sum --check
RUN tar -C /usr/local -xzf go1.22.3.linux-amd64.tar.gz; rm go1.22.3.linux-amd64.tar.gz
ENV PATH="${PATH}:/usr/local/go/bin"
ENV IN_TERN_BUILD_CONTAINER Y

View File

@@ -26,9 +26,9 @@ build_dir.mkdir(parents=True, exist_ok=True)
if build_type in ('ubuntu', 'ubuntudebug', 'ubuntusanitized', 'ubuntuvalgrind', 'alpine', 'alpinedebug') and 'IN_TERN_BUILD_CONTAINER' not in os.environ:
if build_type.startswith('alpine'):
container = 'ghcr.io/xtxmarkets/ternfs-alpine-build:2025-09-03'
container = 'ghcr.io/xtxmarkets/ternfs-alpine-build:2025-09-18-1'
else:
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-03'
container = 'ghcr.io/xtxmarkets/ternfs-ubuntu-build:2025-09-18'
# See <https://groups.google.com/g/seastar-dev/c/r7W-Kqzy9O4>
# for motivation for `--security-opt seccomp=unconfined`,
# the `--pids-limit -1` is not something I hit but it seems

View File

@@ -62,12 +62,12 @@ struct Duration {
void sleepRetry() const;
};
constexpr Duration operator "" _ns (unsigned long long t) { return Duration(t); }
constexpr Duration operator "" _us (unsigned long long t) { return Duration(t*1'000); }
constexpr Duration operator "" _ms (unsigned long long t) { return Duration(t*1'000'000); }
constexpr Duration operator "" _sec (unsigned long long t) { return Duration(t*1'000'000'000ull); }
constexpr Duration operator "" _mins (unsigned long long t) { return Duration(t*1'000'000'000ull*60); }
constexpr Duration operator "" _hours(unsigned long long t) { return Duration(t*1'000'000'000ull*60*60); }
constexpr Duration operator ""_ns (unsigned long long t) { return Duration(t); }
constexpr Duration operator ""_us (unsigned long long t) { return Duration(t*1'000); }
constexpr Duration operator ""_ms (unsigned long long t) { return Duration(t*1'000'000); }
constexpr Duration operator ""_sec (unsigned long long t) { return Duration(t*1'000'000'000ull); }
constexpr Duration operator ""_mins (unsigned long long t) { return Duration(t*1'000'000'000ull*60); }
constexpr Duration operator ""_hours(unsigned long long t) { return Duration(t*1'000'000'000ull*60*60); }
std::ostream& operator<<(std::ostream& out, Duration d);

View File

@@ -10,6 +10,19 @@ index f356395f3..6eaf71735 100644
#include "rocksdb/slice.h"
diff --git a/trace_replay/trace_replay.cc b/trace_replay/trace_replay.cc
index 37b95852b..c681e374c 100644
--- a/trace_replay/trace_replay.cc
+++ b/trace_replay/trace_replay.cc
@@ -317,7 +317,7 @@ Status TracerHelper::DecodeTraceRecord(Trace* trace, int trace_file_version,
cf_ids.reserve(multiget_size);
multiget_keys.reserve(multiget_size);
for (uint32_t i = 0; i < multiget_size; i++) {
- uint32_t tmp_cfid;
+ uint32_t tmp_cfid = 0;
Slice tmp_key;
GetFixed32(&cfids_payload, &tmp_cfid);
GetLengthPrefixedSlice(&keys_payload, &tmp_key);
diff --git a/util/string_util.h b/util/string_util.h
index 55d106fff..2adf1ec68 100644
--- a/util/string_util.h

View File

@@ -17,14 +17,27 @@ repo_dir = go_dir.parent
parser = argparse.ArgumentParser()
parser.add_argument('--race', action='store_true', help='Build Go with -race')
parser.add_argument('--generate', action='store_true', help='Run generate rather than build')
parser.add_argument('--close-tracker', action='store_true', help='Build the BPF object file for the ternfuse close tracker')
parser.add_argument('paths', nargs='*')
args = parser.parse_args()
paths = args.paths
if args.generate and (args.race or paths):
if args.generate and (args.race or paths or args.close_tracker):
print('--generate only works as the only flag')
os.exit(2)
sys.exit(2)
if args.close_tracker and (args.race or paths or args.generate):
print('--close-tracker only works as the only flag')
sys.exit(2)
if args.close_tracker:
print('Dumping vmlinux.h')
with open(go_dir / 'closetracker' / 'vmlinux.h', 'w') as f:
subprocess.run(['bpftool', 'btf', 'dump', 'file', '/sys/kernel/btf/vmlinux', 'format', 'c'], stdout=f, check=True)
print('Building closetracker.bpf.o')
subprocess.run(['clang', '-g', '-O2', '-target', 'bpf', '-c', go_dir / 'closetracker' / 'closetracker.bpf.c', '-o', go_dir / 'closetracker' / 'closetracker.bpf.o'], check=True)
sys.exit(0)
if not args.generate and len(paths) == 0:
vendor_dir = go_dir / 'vendor'
@@ -42,7 +55,7 @@ if not args.generate and len(paths) == 0:
if 'IN_TERN_BUILD_CONTAINER' not in os.environ:
container = 'ghcr.io/xtxmarkets/ternfs-alpine-build:2025-09-03'
container = 'ghcr.io/xtxmarkets/ternfs-alpine-build:2025-09-18-1'
# See <https://groups.google.com/g/seastar-dev/c/r7W-Kqzy9O4>
# for motivation for `--security-opt seccomp=unconfined`,
# the `--pids-limit -1` is not something I hit but it seems

View File

@@ -447,7 +447,7 @@ func (cm *clientMetadata) parseResponse(log *log.Logger, req *metadataProcessorR
}
return
}
log.Debug("received resp %v req id %v from shard %v", req.resp, req.requestId, req.shard)
log.Debug("received resp %T %v req id %v from shard %v", req.resp, req.resp, req.requestId, req.shard)
// done
req.respCh <- &metadataProcessorResponse{
requestId: req.requestId,
@@ -1116,7 +1116,7 @@ func (c *Client) SetCDCTimeouts(t *timing.ReqTimeouts) {
// Override the block timeout parameters.
// This is only safe to use during initialization.
func (c *Client) SetBlockTimeout(t *timing.ReqTimeouts) {
func (c *Client) SetBlockTimeouts(t *timing.ReqTimeouts) {
c.blockTimeout = t
}

View File

@@ -43,7 +43,7 @@ func (c *Client) metadataRequest(
now := time.Now()
timeout := timeouts.NextNow(startedAt, now)
if timeout == 0 {
log.RaiseAlert("giving up on request to shard %v after waiting for %v", shid, now.Sub(startedAt))
log.RaiseAlert("giving up on request to shard %v after waiting for %v max=%v", shid, now.Sub(startedAt), timeouts.Max)
return msgs.TIMEOUT
}
if counters != nil {

2
go/closetracker/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
vmlinux.h
closetracker.bpf.o

View File

@@ -0,0 +1,93 @@
// Copyright 2025 XTX Markets Technologies Limited
//
// SPDX-License-Identifier: GPL-2.0-or-later
// To be used in conjunction with ternfuse to make sure that only
// files which have been close()d explicitly get linked to the filesystem.
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
// This variable will be set by the userspace loader
volatile const uint32_t target_dev = 0;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
// We don't really have strong guarantees on the interval between a user calling
// close() and flush being invoked at the VFS layer. This is essentially a buffer
// that needs to be big enough to remember the file long enough in that interval.
// Since forgetting a file is pretty bad, make this big.
__uint(max_entries, 1<<20);
__type(key, uint64_t);
__type(value, uint64_t);
} closed_inodes_map SEC(".maps");
SEC("tp/syscalls/sys_enter_close")
int handle_close(struct trace_event_raw_sys_enter* ctx) {
// Exit early if no target device is set
if (target_dev == 0) {
return 0;
}
int fd = (int)ctx->args[0];
struct task_struct* task = (struct task_struct *)bpf_get_current_task();
if (!task) {
return 0;
}
struct files_struct* files = BPF_CORE_READ(task, files);
if (!files) {
return 0;
}
struct fdtable* fdt = BPF_CORE_READ(files, fdt);
if (!fdt) {
return 0;
}
if (fd >= BPF_CORE_READ(fdt, max_fds)) {
return 0;
}
struct file** fd_array = BPF_CORE_READ(fdt, fd);
if (!fd_array) {
return 0;
}
struct file* file;
bpf_probe_read_kernel(&file, sizeof(file), &fd_array[fd]);
if (!file) {
return 0;
}
struct inode* inode = BPF_CORE_READ(file, f_inode);
if (!inode) {
return 0;
}
struct super_block* sb = BPF_CORE_READ(inode, i_sb);
if (!sb) {
return 0;
}
// Filter by the device ID provided by userspace
dev_t dev = BPF_CORE_READ(sb, s_dev);
if (dev != target_dev) {
return 0;
}
uint64_t ino = BPF_CORE_READ(inode, i_ino);
uint64_t* count = bpf_map_lookup_elem(&closed_inodes_map, &ino);
if (count) {
__sync_fetch_and_add(count, 1);
} else {
uint64_t init_val = 1;
bpf_map_update_elem(&closed_inodes_map, &ino, &init_val, BPF_ANY);
}
return 0;
}
char LICENSE[] SEC("license") = "GPL";

View File

@@ -22,6 +22,7 @@ import (
"syscall"
"time"
"xtx/ternfs/core/log"
"xtx/ternfs/core/timing"
"xtx/ternfs/msgs"
)
@@ -334,14 +335,17 @@ func (procs *ManagedProcesses) StartBlockService(ll *log.Logger, opts *BlockServ
}
type FuseOpts struct {
Exe string
Path string
LogLevel log.LogLevel
Wait bool
RegistryAddress string
Profile bool
InitialShardTimeout time.Duration
InitialCDCTimeout time.Duration
Exe string
Path string
LogLevel log.LogLevel
Wait bool
RegistryAddress string
Profile bool
ShardTimeouts timing.ReqTimeouts
CDCTimeouts timing.ReqTimeouts
BlockTimeouts timing.ReqTimeouts
CloseTrackerObject string
SetUid bool
}
func (procs *ManagedProcesses) StartFuse(ll *log.Logger, opts *FuseOpts) string {
@@ -367,11 +371,38 @@ func (procs *ManagedProcesses) StartFuse(ll *log.Logger, opts *FuseOpts) string
if opts.Profile {
args = append(args, "-profile-file", path.Join(opts.Path, "pprof"))
}
if opts.InitialCDCTimeout != 0 {
args = append(args, "-initial-cdc-timeout", opts.InitialCDCTimeout.String())
if opts.ShardTimeouts.Initial != 0 {
args = append(args, "-initial-shard-timeout", opts.ShardTimeouts.Initial.String())
}
if opts.InitialShardTimeout != 0 {
args = append(args, "-initial-shard-timeout", opts.InitialShardTimeout.String())
if opts.ShardTimeouts.Max != 0 {
args = append(args, "-max-shard-timeout", opts.ShardTimeouts.Max.String())
}
if opts.ShardTimeouts.Overall != 0 {
args = append(args, "-overall-shard-timeout", opts.ShardTimeouts.Overall.String())
}
if opts.CDCTimeouts.Initial != 0 {
args = append(args, "-initial-cdc-timeout", opts.CDCTimeouts.Initial.String())
}
if opts.CDCTimeouts.Max != 0 {
args = append(args, "-max-cdc-timeout", opts.CDCTimeouts.Max.String())
}
if opts.CDCTimeouts.Overall != 0 {
args = append(args, "-overall-cdc-timeout", opts.CDCTimeouts.Overall.String())
}
if opts.BlockTimeouts.Initial != 0 {
args = append(args, "-initial-block-timeout", opts.BlockTimeouts.Initial.String())
}
if opts.BlockTimeouts.Max != 0 {
args = append(args, "-max-block-timeout", opts.BlockTimeouts.Max.String())
}
if opts.BlockTimeouts.Overall != 0 {
args = append(args, "-overall-block-timeout", opts.BlockTimeouts.Overall.String())
}
if opts.CloseTrackerObject != "" {
args = append(args, "-close-tracker-object", opts.CloseTrackerObject)
}
if opts.SetUid {
args = append(args, "-set-uid")
}
args = append(args, mountPoint)
procs.Start(ll, &ManagedProcessArgs{

View File

@@ -1,15 +1,18 @@
module xtx/ternfs
go 1.22
go 1.23.0
toolchain go1.24.3
require (
github.com/aws/aws-sdk-go-v2 v1.36.4
github.com/aws/aws-sdk-go-v2/config v1.29.16
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.2
github.com/hanwen/go-fuse/v2 v2.2.0
github.com/mattn/go-sqlite3 v1.14.16
github.com/cilium/ebpf v0.19.0
github.com/hanwen/go-fuse/v2 v2.8.0
github.com/mattn/go-sqlite3 v1.14.32
golang.org/x/sync v0.11.0
golang.org/x/sys v0.16.0
golang.org/x/sys v0.31.0
)
require (

View File

@@ -34,15 +34,37 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.33.21 h1:nyLjs8sYJShFYj6aiyjCBI3EcLn
github.com/aws/aws-sdk-go-v2/service/sts v1.33.21/go.mod h1:EhdxtZ+g84MSGrSrHzZiUm9PYiZkrADNja15wtRJSJo=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM=
github.com/hanwen/go-fuse/v2 v2.2.0/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
github.com/cilium/ebpf v0.19.0 h1:Ro/rE64RmFBeA9FGjcTc+KmCeY6jXmryu6FfnzPRIao=
github.com/cilium/ebpf v0.19.0/go.mod h1:fLCgMo3l8tZmAdM3B2XqdFzXBpwkcSTroaVqN08OWVY=
github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s=
github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/hanwen/go-fuse/v2 v2.8.0 h1:wV8rG7rmCz8XHSOwBZhG5YcVqcYjkzivjmbaMafPlAs=
github.com/hanwen/go-fuse/v2 v2.8.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=

File diff suppressed because it is too large Load Diff

View File

@@ -246,7 +246,7 @@ func main() {
c.SetFetchBlockServices()
c.SetShardTimeouts(shardTimeouts)
c.SetCDCTimeouts(cdcTimeouts)
c.SetBlockTimeout(blockTimeouts)
c.SetBlockTimeouts(blockTimeouts)
c.IncreaseNumRegistryHandlersTo(*numRegistryHandlers)
counters := client.NewClientCounters()
c.SetCounters(counters)

View File

@@ -235,11 +235,15 @@ func main() {
if loc > 0 {
dirName = fmt.Sprintf("%s_loc%d", dirName, loc)
}
failureDomain := fmt.Sprintf("%d", i)
if numLocations > 1 {
failureDomain = fmt.Sprintf("%d_%d", i, loc)
}
opts := managedprocess.BlockServiceOpts{
Exe: goExes.BlocksExe,
Path: path.Join(*dataDir, dirName),
StorageClasses: storageClasses,
FailureDomain: fmt.Sprintf("%d_%d", i, loc),
FailureDomain: failureDomain,
Location: msgs.Location(loc),
LogLevel: level,
RegistryAddress: registryAddressToUse,

View File

@@ -56,17 +56,7 @@ func cleanupAfterTest(
counters *client.ClientCounters,
pauseBlockServiceKiller *sync.Mutex,
) {
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
shardTimeout := client.DefaultShardTimeout
shardTimeout.Initial = 5 * time.Millisecond
cdcTimeout := client.DefaultCDCTimeout
cdcTimeout.Initial = 5 *time.Millisecond
c.SetCounters(counters)
c.SetCDCTimeouts(&cdcTimeout)
c.SetShardTimeouts(&shardTimeout)
c := newTestClient(log, registryAddress, counters)
defer c.Close()
// Delete all current things
deleteDir(log, c, msgs.NULL_INODE_ID, "", 0, msgs.ROOT_DIR_INODE_ID)
@@ -99,7 +89,7 @@ func cleanupAfterTest(
panic(err)
}
if len(visitDirsResp.Ids) > 0 && !(len(visitDirsResp.Ids) == 1 && visitDirsResp.Ids[0] == msgs.ROOT_DIR_INODE_ID) {
panic(err)
panic(fmt.Errorf("bad response ids"))
}
// No files
visitFilesResp := msgs.VisitFilesResp{}

View File

@@ -13,7 +13,6 @@ import (
"path"
"sort"
"unsafe"
"xtx/ternfs/client"
"xtx/ternfs/core/log"
"xtx/ternfs/core/wyhash"
"xtx/ternfs/msgs"
@@ -72,7 +71,7 @@ func openDir(path string) (C.int, error) {
type dent struct {
ino msgs.InodeId
nextOffset int64
nextOffset C.off_t
reclen C.ushort
typ C.char
name string
@@ -163,15 +162,9 @@ func dirSeek(fd C.int, off C.long, whence C.int) (C.long, error) {
return off, nil
}
func dirSeekTest(log *log.Logger, registryAddress string, mountPoint string) {
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
// create 10k files/symlinks/dirs
numFiles := 10_000
log.Info("creating %v paths", numFiles)
for i := 0; i < numFiles; i++ {
func dirSeekTest(log *log.Logger, registryAddress string, mountPoint string, numPaths int) {
log.Info("creating %v paths", numPaths)
for i := 0; i < numPaths; i++ {
path := path.Join(mountPoint, fmt.Sprintf("%v", i))
if i%10 == 0 { // dirs, not too many since they're more expensive to create
if err := os.Mkdir(path, 0777); err != nil {
@@ -205,6 +198,8 @@ func dirSeekTest(log *log.Logger, registryAddress string, mountPoint string) {
}
// verify dents with what we get straight from the server
{
c := newTestClient(log, registryAddress, nil)
defer c.Close()
ix := 2 // . and ..
req := msgs.ReadDirReq{
DirId: msgs.ROOT_DIR_INODE_ID,
@@ -217,8 +212,8 @@ func dirSeekTest(log *log.Logger, registryAddress string, mountPoint string) {
for i := range resp.Results {
dent := &dents[ix+i]
edge := &resp.Results[i]
if dent.name != edge.Name || dent.ino != edge.TargetId || dents[ix+i-1].nextOffset != int64(edge.NameHash) {
panic(fmt.Errorf("mismatching edge %+v and dent %+v dent at index %+v", ix+i, edge, dent))
if dent.name != edge.Name || dent.ino != edge.TargetId || dents[ix+i-1].nextOffset != C.off_t(edge.NameHash) {
panic(fmt.Errorf("mismatching edge %+v and dent %+v dent at index %+v", edge, dent, ix+i))
}
}
ix += len(resp.Results)

View File

@@ -344,11 +344,7 @@ func fileHistoryTest(
seed := uint64(i)
go func() {
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
c.SetCounters(counters)
c := newTestClient(log, registryAddress, counters)
defer c.Close()
fileHistoryStepSingle(log, c, dirInfoCache, opts, seed, prefix)
wait.Done()

View File

@@ -532,7 +532,7 @@ func (c *posixFsTestHarness) checkFileData(log *log.Logger, fullFilePath string,
}
defer unix.Munmap(mm)
}
log.Debug("checking for file %v(ino=%d) of expected len %v", fullFilePath, stat.Ino, fullSize)
log.Debug("checking for file %v(ino=%016x) of expected len %v", fullFilePath, stat.Ino, fullSize)
// First do some random reads, hopefully stimulating span caches in some interesting way
if fullSize > 1 {
for i := 0; i < 10; i++ {
@@ -549,8 +549,8 @@ func (c *posixFsTestHarness) checkFileData(log *log.Logger, fullFilePath string,
if c.readWithMmap {
copy(actualPartialData, mm[offset:])
} else {
if _, err := io.ReadFull(f, actualPartialData); err != nil {
panic(err)
if read, err := io.ReadFull(f, actualPartialData); err != nil {
panic(fmt.Errorf("could not read file %v(ino=%016x) from %v to %v (%v read): %v", fullFilePath, stat.Ino, offset, offset+size, read, err))
}
}
checkFileData(fullFilePath, offset, offset+size, actualPartialData, expectedPartialData)
@@ -1088,12 +1088,7 @@ func fsTestInternal[Id comparable](
// Now, try to migrate away from one block service, to stimulate that code path
// in tests somewhere.
if opts.maxFileSize > 0 {
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
c.SetFetchBlockServices()
c.SetCounters(counters)
c := newTestClient(log, registryAddress, counters)
defer c.Close()
blockServiceToPurge := findBlockServiceToPurge(log, c)
log.Info("will migrate block service %v", blockServiceToPurge)
@@ -1271,13 +1266,8 @@ func fsTest(
counters *client.ClientCounters,
harnessType WhichHarness,
) {
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
c.SetFetchBlockServices()
c := newTestClient(log, registryAddress, counters)
defer c.Close()
c.SetCounters(counters)
switch h := harnessType.(type) {
case posixHarness:
harness := &posixFsTestHarness{

View File

@@ -23,29 +23,30 @@ type createInode struct {
creationTime msgs.TernTime
}
type parallelDirsOpts struct {
numRootDirs int
numThreads int
actionsPerThread int
}
func parallelDirsTest(
log *log.Logger,
registryAddress string,
counters *client.ClientCounters,
opts *parallelDirsOpts,
) {
numRootDirs := 10
numThreads := 1000
numThreadsDigits := int(math.Ceil(math.Log10(float64(numThreads))))
actionsPerThread := 100
numThreadsDigits := int(math.Ceil(math.Log10(float64(opts.numThreads))))
entityName := func(tid int, i int) string {
return fmt.Sprintf(fmt.Sprintf("%%0%dd-%%d", numThreadsDigits), tid, i)
}
client, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
client.SetCounters(counters)
client := newTestClient(log, registryAddress, counters)
defer client.Close()
log.Info("creating root dirs")
rootDirs := []msgs.InodeId{}
for i := 0; i < numRootDirs; i++ {
for i := 0; i < opts.numRootDirs; i++ {
resp := &msgs.MakeDirectoryResp{}
if err := client.CDCRequest(log, &msgs.MakeDirectoryReq{OwnerId: msgs.ROOT_DIR_INODE_ID, Name: fmt.Sprintf("%v", i)}, resp); err != nil {
panic(err)
@@ -57,16 +58,16 @@ func parallelDirsTest(
log.Info("creating directories in parallel")
var wg sync.WaitGroup
wg.Add(numThreads)
wg.Add(opts.numThreads)
done := uint64(0)
inodes := make([]map[string]createInode, numThreads)
for i := 0; i < numThreads; i++ {
inodes := make([]map[string]createInode, opts.numThreads)
for i := 0; i < opts.numThreads; i++ {
tid := i
inodes[tid] = make(map[string]createInode)
go func() {
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
rand := wyhash.New(uint64(tid))
for i := 0; i < actionsPerThread; i++ {
for i := 0; i < opts.actionsPerThread; i++ {
which := rand.Float64()
// we mostly issue creates since only one dir rename
// can exist at the same time.
@@ -177,7 +178,7 @@ func parallelDirsTest(
}
}
if atomic.AddUint64(&done, 1)%256 == 0 {
log.Info("went through %v/%v actions", atomic.LoadUint64(&done), numThreads*actionsPerThread)
log.Info("went through %v/%v actions", atomic.LoadUint64(&done), opts.numThreads*opts.actionsPerThread)
}
}
wg.Done()
@@ -198,7 +199,7 @@ func parallelDirsTest(
log.Info("checking")
expectedDirs := make(map[string]struct{})
for i := 0; i < numThreads; i++ {
for i := 0; i < opts.numThreads; i++ {
for d := range inodes[i] {
expectedDirs[d] = struct{}{}
}

View File

@@ -28,6 +28,7 @@ import (
"xtx/ternfs/core/log"
"xtx/ternfs/core/managedprocess"
lrecover "xtx/ternfs/core/recover"
"xtx/ternfs/core/timing"
"xtx/ternfs/core/wyhash"
"xtx/ternfs/msgs"
@@ -79,8 +80,8 @@ func totalRequests[K comparable](cs map[K]*client.ReqCounters) uint64 {
type RunTests struct {
overrides *cfgOverrides
registryIp string
registryPort uint16
registryIp string
registryPort uint16
mountPoint string
fuseMountPoint string
kmod bool
@@ -124,8 +125,12 @@ func (r *RunTests) test(
counters := client.NewClientCounters()
r.print("running %s test, %s\n", name, extra)
log.Info("running %s test, %s\n", name, extra) // also in log to track progress in CI more easily
if extra != "" {
extra = ", " + extra
}
r.print("running %s test%s\n", name, extra)
log.Info("running %s test%s\n", name, extra) // also in log to track progress in CI more easily
t0 := time.Now()
run(counters)
elapsed := time.Since(t0)
@@ -256,25 +261,16 @@ func (r *RunTests) run(
log *log.Logger,
) {
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
c, err := client.NewClient(log, nil, r.registryAddress(), msgs.AddrsInfo{})
if err != nil {
panic(err)
}
c.SetFetchBlockServices()
defer c.Close()
updateTimeouts(c)
{
c := newTestClient(log, r.registryAddress(), nil)
// We want to immediately clean up everything when we run the GC manually
if err != nil {
panic(err)
}
snapshotPolicy := &msgs.SnapshotPolicy{
DeleteAfterVersions: msgs.ActiveDeleteAfterVersions(0),
}
if err := c.MergeDirectoryInfo(log, msgs.ROOT_DIR_INODE_ID, snapshotPolicy); err != nil {
panic(err)
}
c.Close()
}
fileHistoryOpts := fileHistoryTestOpts{
@@ -349,12 +345,20 @@ func (r *RunTests) run(
},
)
parallelDirsOpts := &parallelDirsOpts{
numRootDirs: 10,
numThreads: 100,
actionsPerThread: 100,
}
if r.short {
parallelDirsOpts.actionsPerThread = 10
}
r.test(
log,
"parallel dirs",
"",
fmt.Sprintf("%v root dirs, %v threads, %v actions per thread", parallelDirsOpts.numRootDirs, parallelDirsOpts.numThreads, parallelDirsOpts.actionsPerThread),
func(counters *client.ClientCounters) {
parallelDirsTest(log, r.registryAddress(), counters)
parallelDirsTest(log, r.registryAddress(), counters, parallelDirsOpts)
},
)
@@ -457,12 +461,12 @@ func (r *RunTests) run(
"",
func(counters *client.ClientCounters) {
fn := path.Join(r.mountPoint, "test")
if err := ioutil.WriteFile(fn, []byte{}, 0644); err != nil {
if err := os.WriteFile(fn, []byte{}, 0644); err != nil {
panic(err)
}
time1 := time.Now()
time2 := time.Now()
if time1 == time2 {
if time.Time.Equal(time1, time2) {
panic(fmt.Errorf("same times"))
}
if err := syscall.UtimesNano(fn, []syscall.Timespec{syscall.NsecToTimespec(time1.UnixNano()), syscall.NsecToTimespec(time2.UnixNano())}); err != nil {
@@ -487,6 +491,7 @@ func (r *RunTests) run(
if _, err := os.ReadFile(fn); err != nil {
panic(err)
}
time.Sleep(time.Second) // try to flush out OS caches, esp in FUSE
info, err = os.Stat(fn)
if err != nil {
panic(err)
@@ -497,10 +502,11 @@ func (r *RunTests) run(
panic(fmt.Errorf("atime didn't update, %v > %v", now, atime))
}
// make sure O_NOATIME is respected
file, err := os.OpenFile(fn, syscall.O_RDONLY|syscall.O_NOATIME, 0)
file, err := os.OpenFile(fn, os.O_RDONLY|syscall.O_NOATIME, 0)
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond)
info, err = os.Stat(fn)
file.Close()
if err != nil {
@@ -522,43 +528,45 @@ func (r *RunTests) run(
rand := wyhash.New(42)
for i := 0; i < 100; i++ {
fn := path.Join(r.mountPoint, fmt.Sprintf("test%v", i))
f, err := os.OpenFile(fn, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
panic(err)
}
// write ~50MB, alternating between real content and holes
// Write ~50MB, alternating between real content and holes
// everything random so we try to cheaply stimulate all possible start/end
// configurations
// configurations.
data := []byte{}
fileSize := int64(0)
offset := int64(0)
for i := 0; i < 10; i++ {
// the lowest value where we have multiple pages
// testing multiple spans is probably not worth it, the code
// is identical
sz := int64(rand.Uint64()%10000 + 1) // ]0, 10000]
if rand.Uint32()%2 == 0 { // hole
log.Debug("extending %v with %v zeros using seek", fn, sz)
var whence int
var offset int64
expectedOffset := int64(len(data)) + sz
var seekOffset int64
expectedOffset := offset + sz
switch rand.Uint32() % 3 {
case 0:
whence = io.SeekStart
offset = expectedOffset
seekOffset = expectedOffset
case 1:
whence = io.SeekCurrent
offset = sz
seekOffset = expectedOffset - offset
case 2:
whence = io.SeekEnd
offset = sz
seekOffset = expectedOffset - fileSize
}
retOffset, err := f.Seek(offset, whence)
log.Debug("extending %v with %v zeros using seek (offset=%v fileSize=%v seekOffset=%v whence=%v)", fn, sz, offset, fileSize, seekOffset, whence)
retOffset, err := f.Seek(seekOffset, whence)
if err != nil {
panic(err)
}
if retOffset != expectedOffset {
panic(fmt.Errorf("unexpected offset %v, expected %v", retOffset, expectedOffset))
panic(fmt.Errorf("unexpected offset %v, expected %v (whence=%v, %v + %v)", retOffset, expectedOffset, whence, len(data), sz))
}
data = append(data, make([]byte, int(sz))...)
offset = expectedOffset
} else { // read data
log.Debug("extending %v with %v random bytes using write", fn, sz)
chunk := make([]byte, sz)
@@ -568,7 +576,12 @@ func (r *RunTests) run(
if _, err := f.Write(chunk); err != nil {
panic(err)
}
// append zeros
data = append(data, make([]byte, int(offset)-len(data))...)
// append chunk
data = append(data, chunk...)
fileSize = int64(len(data))
offset = fileSize
}
}
if err := f.Close(); err != nil {
@@ -586,26 +599,35 @@ func (r *RunTests) run(
},
)
dirSeekPaths := 10_000
if r.short {
dirSeekPaths = 1_000
}
r.test(
log,
"dir seek",
"",
func(counters *client.ClientCounters) {
dirSeekTest(log, r.registryAddress(), r.mountPoint)
dirSeekTest(log, r.registryAddress(), r.mountPoint, dirSeekPaths)
},
)
parallelWriteThreads := 10000
if r.short {
parallelWriteThreads = 1000
}
r.test(
log,
"parallel write",
"",
fmt.Sprintf("%v threads", parallelWriteThreads),
func(counters *client.ClientCounters) {
numThreads := 10000
bufPool := bufpool.NewBufPool()
dirInfoCache := client.NewDirInfoCache()
var wg sync.WaitGroup
wg.Add(numThreads)
for i := 0; i < numThreads; i++ {
wg.Add(parallelWriteThreads)
c := newTestClient(log, r.registryAddress(), counters)
defer c.Close()
for i := 0; i < parallelWriteThreads; i++ {
ti := i
go func() {
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
@@ -727,7 +749,7 @@ func (bsv *blockServiceVictim) start(
StorageClasses: bsv.storageClasses,
FailureDomain: bsv.failureDomain,
LogLevel: log.Level(),
RegistryAddress: fmt.Sprintf("127.0.0.1:%d", registryPort),
RegistryAddress: fmt.Sprintf("127.0.0.1:%d", registryPort),
FutureCutoff: &testBlockFutureCutoff,
Addr1: fmt.Sprintf("127.0.0.1:%d", port1),
Addr2: fmt.Sprintf("127.0.0.1:%d", port2),
@@ -866,29 +888,21 @@ func killBlockServices(
}()
}
func updateTimeouts(c *client.Client) {
shardTimeout := client.DefaultShardTimeout
cdcTimeout := client.DefaultCDCTimeout
blockTimeout := client.DefaultBlockTimeout
var shardTimeouts timing.ReqTimeouts
var cdcTimeouts timing.ReqTimeouts
var blockTimeouts timing.ReqTimeouts
// In tests where we intentionally drop packets this makes things _much_
// faster
shardTimeout.Initial = 5 * time.Millisecond
cdcTimeout.Initial = 10 * time.Millisecond
// Tests fail frequently hitting various timeouts. Higher Max and Overall
// timeouts makes tests much more stable
shardTimeout.Max = 20 * time.Second
cdcTimeout.Max = 20 * time.Second
shardTimeout.Overall = 60 * time.Second
cdcTimeout.Overall = 60 * time.Second
// Retry block stuff quickly to avoid being starved by the block service
// killer (and also to go faster)
blockTimeout.Max = time.Second
blockTimeout.Overall = 10 * time.Minute // for block service killer tests
c.SetShardTimeouts(&shardTimeout)
c.SetCDCTimeouts(&cdcTimeout)
c.SetBlockTimeout(&blockTimeout)
func newTestClient(log *log.Logger, registryAddress string, counters *client.ClientCounters) *client.Client {
c, err := client.NewClient(log, nil, registryAddress, msgs.AddrsInfo{})
if err != nil {
panic(err)
}
c.SetFetchBlockServices()
c.SetCounters(counters)
c.SetShardTimeouts(&shardTimeouts)
c.SetCDCTimeouts(&cdcTimeouts)
c.SetBlockTimeouts(&blockTimeouts)
return c
}
// 0 interval won't do, because otherwise transient files will immediately be
@@ -921,10 +935,32 @@ func main() {
blockServiceKiller := flag.Bool("block-service-killer", false, "Go around killing block services to stimulate paths recovering from that.")
race := flag.Bool("race", false, "Go race detector")
leaderOnly := flag.Bool("leader-only", false, "Run only LogsDB leader with LEADER_NO_FOLLOWERS")
closeTrackerObject := flag.String("close-tracker-object", "", "Compiled BPF object to track explicitly closed files")
flag.Var(&overrides, "cfg", "Config overrides")
flag.Parse()
noRunawayArgs()
{
shardTimeouts = client.DefaultShardTimeout
cdcTimeouts = client.DefaultCDCTimeout
blockTimeouts = client.DefaultBlockTimeout
// In tests where we intentionally drop packets this makes things _much_
// faster
shardTimeouts.Initial = 5 * time.Millisecond
cdcTimeouts.Initial = 10 * time.Millisecond
// Tests fail frequently hitting various timeouts. Higher Max and Overall
// timeouts makes tests much more stable
shardTimeouts.Max = 20 * time.Second
cdcTimeouts.Max = 20 * time.Second
shardTimeouts.Overall = 60 * time.Second
cdcTimeouts.Overall = 60 * time.Second
// Retry block stuff quickly to avoid being starved by the block service
// killer (and also to go faster)
blockTimeouts.Max = time.Second
blockTimeouts.Overall = 10 * time.Minute // for block service killer tests
}
filterRe := regexp.MustCompile(*filter)
cleanupDbDir := false
@@ -999,13 +1035,13 @@ func main() {
if *binariesDir != "" {
cppExes = &managedprocess.CppExes{
RegistryExe: path.Join(*binariesDir, "ternregistry"),
ShardExe: path.Join(*binariesDir, "ternshard"),
CDCExe: path.Join(*binariesDir, "terncdc"),
DBToolsExe: path.Join(*binariesDir, "terndbtools"),
ShardExe: path.Join(*binariesDir, "ternshard"),
CDCExe: path.Join(*binariesDir, "terncdc"),
DBToolsExe: path.Join(*binariesDir, "terndbtools"),
}
goExes = &managedprocess.GoExes{
BlocksExe: path.Join(*binariesDir, "ternblocks"),
FuseExe: path.Join(*binariesDir, "ternfuse"),
BlocksExe: path.Join(*binariesDir, "ternblocks"),
FuseExe: path.Join(*binariesDir, "ternfuse"),
}
} else {
fmt.Printf("building shard/cdc/blockservice/registry\n")
@@ -1103,7 +1139,7 @@ func main() {
opts.LogsDBFlags = []string{"-logsdb-leader"}
}
}
if (r == 0) {
if r == 0 {
opts.Addr1 = fmt.Sprintf("127.0.0.1:%v", registryPort)
} else {
opts.Addr1 = "127.0.0.1:0"
@@ -1115,7 +1151,7 @@ func main() {
procs.StartRegistry(l, &opts)
}
}
err := client.WaitForRegistry(l, registryAddress, 10*time.Second)
if err != nil {
panic(fmt.Errorf("failed to connect to registry %v", err))
@@ -1178,15 +1214,15 @@ func main() {
// Start CDC
for r := uint8(0); r < replicaCount; r++ {
cdcOpts := &managedprocess.CDCOpts{
ReplicaId: msgs.ReplicaId(r),
Exe: cppExes.CDCExe,
Dir: path.Join(*dataDir, fmt.Sprintf("cdc_%d", r)),
LogLevel: level,
Valgrind: *buildType == "valgrind",
Perf: *profile,
ReplicaId: msgs.ReplicaId(r),
Exe: cppExes.CDCExe,
Dir: path.Join(*dataDir, fmt.Sprintf("cdc_%d", r)),
LogLevel: level,
Valgrind: *buildType == "valgrind",
Perf: *profile,
RegistryAddress: registryAddress,
Addr1: "127.0.0.1:0",
Addr2: "127.0.0.1:0",
Addr1: "127.0.0.1:0",
Addr2: "127.0.0.1:0",
}
if r == 0 {
if *leaderOnly {
@@ -1215,7 +1251,7 @@ func main() {
Valgrind: *buildType == "valgrind",
Perf: *profile,
OutgoingPacketDrop: *outgoingPacketDrop,
RegistryAddress: registryAddress,
RegistryAddress: registryAddress,
Addr1: "127.0.0.1:0",
Addr2: "127.0.0.1:0",
TransientDeadlineInterval: &testTransientDeadlineInterval,
@@ -1250,14 +1286,17 @@ func main() {
}
fuseMountPoint := procs.StartFuse(l, &managedprocess.FuseOpts{
Exe: goExes.FuseExe,
Path: path.Join(*dataDir, "fuse"),
LogLevel: level,
Wait: true,
RegistryAddress: registryAddress,
Profile: *profile,
InitialShardTimeout: client.DefaultShardTimeout.Initial,
InitialCDCTimeout: client.DefaultCDCTimeout.Initial,
Exe: goExes.FuseExe,
Path: path.Join(*dataDir, "fuse"),
LogLevel: level,
Wait: true,
RegistryAddress: registryAddress,
Profile: *profile,
ShardTimeouts: shardTimeouts,
CDCTimeouts: cdcTimeouts,
BlockTimeouts: blockTimeouts,
CloseTrackerObject: *closeTrackerObject,
SetUid: true,
})
var mountPoint string
@@ -1332,8 +1371,8 @@ func main() {
go func() {
r := RunTests{
overrides: &overrides,
registryIp: "127.0.0.1",
registryPort: registryPort,
registryIp: "127.0.0.1",
registryPort: registryPort,
mountPoint: mountPoint,
fuseMountPoint: fuseMountPoint,
kmod: *kmod,

View File

@@ -102,7 +102,7 @@ func newState(
{
blockTimeout := client.DefaultBlockTimeout
blockTimeout.Overall = 5 * time.Minute
st.client.SetBlockTimeout(&blockTimeout)
st.client.SetBlockTimeouts(&blockTimeout)
shardTimeout := client.DefaultShardTimeout
shardTimeout.Overall = 5 * time.Minute
st.client.SetShardTimeouts(&shardTimeout)

25
go/vendor/github.com/cilium/ebpf/.clang-format generated vendored Normal file
View File

@@ -0,0 +1,25 @@
---
Language: Cpp
BasedOnStyle: LLVM
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: true
AlignEscapedNewlines: DontAlign
# mkdocs annotations in source code are written as trailing comments
# and alignment pushes these really far away from the content.
AlignTrailingComments: false
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: false
BreakBeforeBraces: Attach
IndentWidth: 4
KeepEmptyLinesAtTheStartOfBlocks: false
TabWidth: 4
UseTab: ForContinuationAndIndentation
ColumnLimit: 1000
# Go compiler comments need to stay unindented.
CommentPragmas: '^go:.*'
# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64
# and sorting makes this impossible.
SortIncludes: false
...

4
go/vendor/github.com/cilium/ebpf/.gitattributes generated vendored Normal file
View File

@@ -0,0 +1,4 @@
# Force line ending normalisation
* text=auto
# Show types.go in the PR diff view by default
internal/sys/types.go linguist-generated=false

14
go/vendor/github.com/cilium/ebpf/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,14 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
*.o
!*_bpf*.o
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

29
go/vendor/github.com/cilium/ebpf/.golangci.yaml generated vendored Normal file
View File

@@ -0,0 +1,29 @@
version: "2"
linters:
default: none
enable:
- depguard
- govet
- ineffassign
- misspell
- unused
settings:
depguard:
rules:
no-x-sys-unix:
files:
- '!**/internal/unix/*.go'
- '!**/examples/**/*.go'
- '!**/docs/**/*.go'
deny:
- pkg: golang.org/x/sys/unix
desc: use internal/unix instead
formatters:
enable:
- gofmt
- goimports
settings:
goimports:
local-prefixes:
- github.com/cilium/ebpf

12
go/vendor/github.com/cilium/ebpf/.vimto.toml generated vendored Normal file
View File

@@ -0,0 +1,12 @@
kernel="ghcr.io/cilium/ci-kernels:stable"
smp="cpus=2"
memory="1G"
user="root"
setup=[
"mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup",
"/bin/sh -c 'modprobe bpf_testmod || true'",
"dmesg --clear",
]
teardown=[
"dmesg --read-clear",
]

11
go/vendor/github.com/cilium/ebpf/CODEOWNERS generated vendored Normal file
View File

@@ -0,0 +1,11 @@
* @cilium/ebpf-lib-maintainers
features/ @rgo3
link/ @mmat11
perf/ @florianl
ringbuf/ @florianl
btf/ @dylandreimerink
docs/ @ti-mo

46
go/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

5
go/vendor/github.com/cilium/ebpf/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# Contributing to ebpf-go
Want to contribute to ebpf-go? There are a few things you need to know.
We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started.

23
go/vendor/github.com/cilium/ebpf/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
MIT License
Copyright (c) 2017 Nathan Sweet
Copyright (c) 2018, 2019 Cloudflare
Copyright (c) 2019 Authors of Cilium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

3
go/vendor/github.com/cilium/ebpf/MAINTAINERS.md generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# Maintainers
Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md)

117
go/vendor/github.com/cilium/ebpf/Makefile generated vendored Normal file
View File

@@ -0,0 +1,117 @@
# The development version of clang is distributed as the 'clang' binary,
# while stable/released versions have a version number attached.
# Pin the default clang to a stable version.
CLANG ?= clang-20
STRIP ?= llvm-strip-20
OBJCOPY ?= llvm-objcopy-20
CFLAGS := -O2 -g -Wall -Werror -mcpu=v2 $(CFLAGS)
CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/
# Obtain an absolute path to the directory of the Makefile.
# Assume the Makefile is in the root of the repository.
REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
# Prefer podman if installed, otherwise use docker.
# Note: Setting the var at runtime will always override.
CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker)
CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), \
--log-driver=none \
-v "$(shell go env GOCACHE)":/root/.cache/go-build \
-v "$(shell go env GOMODCACHE)":/go/pkg/mod, --user "${UIDGID}")
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
TARGETS := \
testdata/loader-clang-14 \
testdata/loader-clang-17 \
testdata/loader-$(CLANG) \
testdata/manyprogs \
testdata/btf_map_init \
testdata/invalid_map \
testdata/raw_tracepoint \
testdata/invalid_map_static \
testdata/invalid_btf_map_init \
testdata/strings \
testdata/freplace \
testdata/fentry_fexit \
testdata/iproute2_map_compat \
testdata/map_spin_lock \
testdata/subprog_reloc \
testdata/fwd_decl \
testdata/kconfig \
testdata/ksym \
testdata/kfunc \
testdata/invalid-kfunc \
testdata/kfunc-kmod \
testdata/constants \
testdata/errors \
testdata/variables \
btf/testdata/relocs \
btf/testdata/relocs_read \
btf/testdata/relocs_read_tgt \
btf/testdata/relocs_enum \
btf/testdata/tags \
cmd/bpf2go/testdata/minimal
.PHONY: all clean container-all container-shell generate
.DEFAULT_TARGET = container-all
# Build all ELF binaries using a containerized LLVM toolchain.
container-all:
+${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
--env HOME="/tmp" \
--env BPF2GO_CC="$(CLANG)" \
--env BPF2GO_CFLAGS="$(CFLAGS)" \
"${IMAGE}:${VERSION}" \
make all
# (debug) Drop the user into a shell inside the container as root.
# Set BPF2GO_ envs to make 'make generate' just work.
container-shell:
${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \
-v "${REPODIR}":/ebpf -w /ebpf \
--env BPF2GO_CC="$(CLANG)" \
--env BPF2GO_CFLAGS="$(CFLAGS)" \
"${IMAGE}:${VERSION}"
clean:
find "$(CURDIR)" -name "*.elf" -delete
find "$(CURDIR)" -name "*.o" -delete
format:
find . -type f -name "*.c" | xargs clang-format -i
all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
generate:
go generate -run "internal/cmd/gentypes" ./...
go generate -skip "internal/cmd/gentypes" ./...
testdata/loader-%-el.elf: testdata/loader.c
$* $(CFLAGS) -target bpfel -c $< -o $@
$(STRIP) -g $@
testdata/loader-%-eb.elf: testdata/loader.c
$* $(CFLAGS) -target bpfeb -c $< -o $@
$(STRIP) -g $@
%-el.elf: %.c
$(CLANG) $(CFLAGS) -target bpfel -c $< -o $@
$(STRIP) -g $@
%-eb.elf : %.c
$(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
$(STRIP) -g $@
.PHONY: update-kernel-deps
update-kernel-deps: export KERNEL_VERSION?=6.8
update-kernel-deps:
./testdata/sh/update-kernel-deps.sh
$(MAKE) container-all

75
go/vendor/github.com/cilium/ebpf/README.md generated vendored Normal file
View File

@@ -0,0 +1,75 @@
# eBPF
[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
![HoneyGopher](docs/ebpf/ebpf-go.png)
ebpf-go is a pure Go library that provides utilities for loading, compiling, and
debugging eBPF programs. It has minimal external dependencies and is intended to
be used in long running processes.
See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF
ecosystem.
## Getting Started
Please take a look at our [Getting Started] guide.
[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of
eBPF and the library, and help shape the future of the project.
## Getting Help
The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page.
Please search for existing threads before starting a new one. Refrain from
opening issues on the bug tracker if you're just starting out or if you're not
sure if something is a bug in the library code.
Alternatively, [join](https://ebpf.io/slack) the
[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
have other questions regarding the project. Note that this channel is ephemeral
and has its history erased past a certain point, which is less helpful for
others running into the same problem later.
## Packages
This library includes the following packages:
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
assembler, allowing you to write eBPF assembly instructions directly
within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.)
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
compiling and embedding eBPF programs written in C within Go code. As well as
compiling the C code, it auto-generates Go code for loading and manipulating
the eBPF program and map objects.
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
to various hooks
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
`PERF_EVENT_ARRAY`
* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a
`BPF_MAP_TYPE_RINGBUF` map
* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent
of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format.
* [pin](https://pkg.go.dev/github.com/cilium/ebpf/pin) provides APIs for working with pinned objects on bpffs.
## Requirements
* A version of Go that is [supported by
upstream](https://golang.org/doc/devel/release.html#policy)
* Linux: CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed
versions are not supported.
* Windows: CI is run against Windows Server 2022. Only the latest eBPF for Windows
release is supported.
## License
MIT
### eBPF Gopher
The eBPF honeygopher is based on the Go gopher designed by Renee French.
[Getting Started]: https://ebpf-go.dev/guides/getting-started/

180
go/vendor/github.com/cilium/ebpf/asm/alu.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
package asm
//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp
// Source of ALU / ALU64 / Branch operations
//
// msb lsb
// +------------+-+---+
// | op |S|cls|
// +------------+-+---+
type Source uint16
const sourceMask OpCode = 0x0008
// Source bitmask
const (
// InvalidSource is returned by getters when invoked
// on non ALU / branch OpCodes.
InvalidSource Source = 0xffff
// ImmSource src is from constant
ImmSource Source = 0x0000
// RegSource src is from register
RegSource Source = 0x0008
)
// The Endianness of a byte swap instruction.
type Endianness uint8
const endianMask = sourceMask
// Endian flags
const (
InvalidEndian Endianness = 0xff
// Convert to little endian
LE Endianness = 0x00
// Convert to big endian
BE Endianness = 0x08
)
// ALUOp are ALU / ALU64 operations
//
// msb lsb
// +-------+----+-+---+
// | EXT | OP |s|cls|
// +-------+----+-+---+
type ALUOp uint16
const aluMask OpCode = 0x3ff0
const (
// InvalidALUOp is returned by getters when invoked
// on non ALU OpCodes
InvalidALUOp ALUOp = 0xffff
// Add - addition
Add ALUOp = 0x0000
// Sub - subtraction
Sub ALUOp = 0x0010
// Mul - multiplication
Mul ALUOp = 0x0020
// Div - division
Div ALUOp = 0x0030
// SDiv - signed division
SDiv ALUOp = Div + 0x0100
// Or - bitwise or
Or ALUOp = 0x0040
// And - bitwise and
And ALUOp = 0x0050
// LSh - bitwise shift left
LSh ALUOp = 0x0060
// RSh - bitwise shift right
RSh ALUOp = 0x0070
// Neg - sign/unsign signing bit
Neg ALUOp = 0x0080
// Mod - modulo
Mod ALUOp = 0x0090
// SMod - signed modulo
SMod ALUOp = Mod + 0x0100
// Xor - bitwise xor
Xor ALUOp = 0x00a0
// Mov - move value from one place to another
Mov ALUOp = 0x00b0
// MovSX8 - move lower 8 bits, sign extended upper bits of target
MovSX8 ALUOp = Mov + 0x0100
// MovSX16 - move lower 16 bits, sign extended upper bits of target
MovSX16 ALUOp = Mov + 0x0200
// MovSX32 - move lower 32 bits, sign extended upper bits of target
MovSX32 ALUOp = Mov + 0x0300
// ArSh - arithmetic shift
ArSh ALUOp = 0x00c0
// Swap - endian conversions
Swap ALUOp = 0x00d0
)
// HostTo converts from host to another endianness.
func HostTo(endian Endianness, dst Register, size Size) Instruction {
var imm int64
switch size {
case Half:
imm = 16
case Word:
imm = 32
case DWord:
imm = 64
default:
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
Dst: dst,
Constant: imm,
}
}
// BSwap unconditionally reverses the order of bytes in a register.
func BSwap(dst Register, size Size) Instruction {
var imm int64
switch size {
case Half:
imm = 16
case Word:
imm = 32
case DWord:
imm = 64
default:
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: OpCode(ALU64Class).SetALUOp(Swap),
Dst: dst,
Constant: imm,
}
}
// Op returns the OpCode for an ALU operation with a given source.
func (op ALUOp) Op(source Source) OpCode {
return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
}
// Reg emits `dst (op) src`.
func (op ALUOp) Reg(dst, src Register) Instruction {
return Instruction{
OpCode: op.Op(RegSource),
Dst: dst,
Src: src,
}
}
// Imm emits `dst (op) value`.
func (op ALUOp) Imm(dst Register, value int32) Instruction {
return Instruction{
OpCode: op.Op(ImmSource),
Dst: dst,
Constant: int64(value),
}
}
// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
func (op ALUOp) Op32(source Source) OpCode {
return OpCode(ALUClass).SetALUOp(op).SetSource(source)
}
// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
func (op ALUOp) Reg32(dst, src Register) Instruction {
return Instruction{
OpCode: op.Op32(RegSource),
Dst: dst,
Src: src,
}
}
// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
func (op ALUOp) Imm32(dst Register, value int32) Instruction {
return Instruction{
OpCode: op.Op32(ImmSource),
Dst: dst,
Constant: int64(value),
}
}

117
go/vendor/github.com/cilium/ebpf/asm/alu_string.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidSource-65535]
_ = x[ImmSource-0]
_ = x[RegSource-8]
}
const (
_Source_name_0 = "ImmSource"
_Source_name_1 = "RegSource"
_Source_name_2 = "InvalidSource"
)
func (i Source) String() string {
switch {
case i == 0:
return _Source_name_0
case i == 8:
return _Source_name_1
case i == 65535:
return _Source_name_2
default:
return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidEndian-255]
_ = x[LE-0]
_ = x[BE-8]
}
const (
_Endianness_name_0 = "LE"
_Endianness_name_1 = "BE"
_Endianness_name_2 = "InvalidEndian"
)
func (i Endianness) String() string {
switch {
case i == 0:
return _Endianness_name_0
case i == 8:
return _Endianness_name_1
case i == 255:
return _Endianness_name_2
default:
return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidALUOp-65535]
_ = x[Add-0]
_ = x[Sub-16]
_ = x[Mul-32]
_ = x[Div-48]
_ = x[SDiv-304]
_ = x[Or-64]
_ = x[And-80]
_ = x[LSh-96]
_ = x[RSh-112]
_ = x[Neg-128]
_ = x[Mod-144]
_ = x[SMod-400]
_ = x[Xor-160]
_ = x[Mov-176]
_ = x[MovSX8-432]
_ = x[MovSX16-688]
_ = x[MovSX32-944]
_ = x[ArSh-192]
_ = x[Swap-208]
}
const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp"
var _ALUOp_map = map[ALUOp]string{
0: _ALUOp_name[0:3],
16: _ALUOp_name[3:6],
32: _ALUOp_name[6:9],
48: _ALUOp_name[9:12],
64: _ALUOp_name[12:14],
80: _ALUOp_name[14:17],
96: _ALUOp_name[17:20],
112: _ALUOp_name[20:23],
128: _ALUOp_name[23:26],
144: _ALUOp_name[26:29],
160: _ALUOp_name[29:32],
176: _ALUOp_name[32:35],
192: _ALUOp_name[35:39],
208: _ALUOp_name[39:43],
304: _ALUOp_name[43:47],
400: _ALUOp_name[47:51],
432: _ALUOp_name[51:57],
688: _ALUOp_name[57:64],
944: _ALUOp_name[64:71],
65535: _ALUOp_name[71:83],
}
func (i ALUOp) String() string {
if str, ok := _ALUOp_map[i]; ok {
return str
}
return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
}

2
go/vendor/github.com/cilium/ebpf/asm/doc.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Package asm is an assembler for eBPF bytecode.
package asm

23
go/vendor/github.com/cilium/ebpf/asm/func.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
package asm
import "github.com/cilium/ebpf/internal/platform"
//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc
// BuiltinFunc is a built-in eBPF function.
type BuiltinFunc uint32
// BuiltinFuncForPlatform returns a platform specific function constant.
//
// Use this if the library doesn't provide a constant yet.
func BuiltinFuncForPlatform(plat string, value uint32) (BuiltinFunc, error) {
return platform.EncodeConstant[BuiltinFunc](plat, value)
}
// Call emits a function call.
func (fn BuiltinFunc) Call() Instruction {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Call),
Constant: int64(fn),
}
}

223
go/vendor/github.com/cilium/ebpf/asm/func_lin.go generated vendored Normal file
View File

@@ -0,0 +1,223 @@
// Code generated by internal/cmd/genfunctions.awk; DO NOT EDIT.
package asm
// Code in this file is derived from Linux, available under the GPL-2.0 WITH Linux-syscall-note.
import "github.com/cilium/ebpf/internal/platform"
// Built-in functions (Linux).
const (
FnUnspec = BuiltinFunc(platform.LinuxTag | 0) //lint:ignore SA4016 consistency
FnMapLookupElem = BuiltinFunc(platform.LinuxTag | 1)
FnMapUpdateElem = BuiltinFunc(platform.LinuxTag | 2)
FnMapDeleteElem = BuiltinFunc(platform.LinuxTag | 3)
FnProbeRead = BuiltinFunc(platform.LinuxTag | 4)
FnKtimeGetNs = BuiltinFunc(platform.LinuxTag | 5)
FnTracePrintk = BuiltinFunc(platform.LinuxTag | 6)
FnGetPrandomU32 = BuiltinFunc(platform.LinuxTag | 7)
FnGetSmpProcessorId = BuiltinFunc(platform.LinuxTag | 8)
FnSkbStoreBytes = BuiltinFunc(platform.LinuxTag | 9)
FnL3CsumReplace = BuiltinFunc(platform.LinuxTag | 10)
FnL4CsumReplace = BuiltinFunc(platform.LinuxTag | 11)
FnTailCall = BuiltinFunc(platform.LinuxTag | 12)
FnCloneRedirect = BuiltinFunc(platform.LinuxTag | 13)
FnGetCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 14)
FnGetCurrentUidGid = BuiltinFunc(platform.LinuxTag | 15)
FnGetCurrentComm = BuiltinFunc(platform.LinuxTag | 16)
FnGetCgroupClassid = BuiltinFunc(platform.LinuxTag | 17)
FnSkbVlanPush = BuiltinFunc(platform.LinuxTag | 18)
FnSkbVlanPop = BuiltinFunc(platform.LinuxTag | 19)
FnSkbGetTunnelKey = BuiltinFunc(platform.LinuxTag | 20)
FnSkbSetTunnelKey = BuiltinFunc(platform.LinuxTag | 21)
FnPerfEventRead = BuiltinFunc(platform.LinuxTag | 22)
FnRedirect = BuiltinFunc(platform.LinuxTag | 23)
FnGetRouteRealm = BuiltinFunc(platform.LinuxTag | 24)
FnPerfEventOutput = BuiltinFunc(platform.LinuxTag | 25)
FnSkbLoadBytes = BuiltinFunc(platform.LinuxTag | 26)
FnGetStackid = BuiltinFunc(platform.LinuxTag | 27)
FnCsumDiff = BuiltinFunc(platform.LinuxTag | 28)
FnSkbGetTunnelOpt = BuiltinFunc(platform.LinuxTag | 29)
FnSkbSetTunnelOpt = BuiltinFunc(platform.LinuxTag | 30)
FnSkbChangeProto = BuiltinFunc(platform.LinuxTag | 31)
FnSkbChangeType = BuiltinFunc(platform.LinuxTag | 32)
FnSkbUnderCgroup = BuiltinFunc(platform.LinuxTag | 33)
FnGetHashRecalc = BuiltinFunc(platform.LinuxTag | 34)
FnGetCurrentTask = BuiltinFunc(platform.LinuxTag | 35)
FnProbeWriteUser = BuiltinFunc(platform.LinuxTag | 36)
FnCurrentTaskUnderCgroup = BuiltinFunc(platform.LinuxTag | 37)
FnSkbChangeTail = BuiltinFunc(platform.LinuxTag | 38)
FnSkbPullData = BuiltinFunc(platform.LinuxTag | 39)
FnCsumUpdate = BuiltinFunc(platform.LinuxTag | 40)
FnSetHashInvalid = BuiltinFunc(platform.LinuxTag | 41)
FnGetNumaNodeId = BuiltinFunc(platform.LinuxTag | 42)
FnSkbChangeHead = BuiltinFunc(platform.LinuxTag | 43)
FnXdpAdjustHead = BuiltinFunc(platform.LinuxTag | 44)
FnProbeReadStr = BuiltinFunc(platform.LinuxTag | 45)
FnGetSocketCookie = BuiltinFunc(platform.LinuxTag | 46)
FnGetSocketUid = BuiltinFunc(platform.LinuxTag | 47)
FnSetHash = BuiltinFunc(platform.LinuxTag | 48)
FnSetsockopt = BuiltinFunc(platform.LinuxTag | 49)
FnSkbAdjustRoom = BuiltinFunc(platform.LinuxTag | 50)
FnRedirectMap = BuiltinFunc(platform.LinuxTag | 51)
FnSkRedirectMap = BuiltinFunc(platform.LinuxTag | 52)
FnSockMapUpdate = BuiltinFunc(platform.LinuxTag | 53)
FnXdpAdjustMeta = BuiltinFunc(platform.LinuxTag | 54)
FnPerfEventReadValue = BuiltinFunc(platform.LinuxTag | 55)
FnPerfProgReadValue = BuiltinFunc(platform.LinuxTag | 56)
FnGetsockopt = BuiltinFunc(platform.LinuxTag | 57)
FnOverrideReturn = BuiltinFunc(platform.LinuxTag | 58)
FnSockOpsCbFlagsSet = BuiltinFunc(platform.LinuxTag | 59)
FnMsgRedirectMap = BuiltinFunc(platform.LinuxTag | 60)
FnMsgApplyBytes = BuiltinFunc(platform.LinuxTag | 61)
FnMsgCorkBytes = BuiltinFunc(platform.LinuxTag | 62)
FnMsgPullData = BuiltinFunc(platform.LinuxTag | 63)
FnBind = BuiltinFunc(platform.LinuxTag | 64)
FnXdpAdjustTail = BuiltinFunc(platform.LinuxTag | 65)
FnSkbGetXfrmState = BuiltinFunc(platform.LinuxTag | 66)
FnGetStack = BuiltinFunc(platform.LinuxTag | 67)
FnSkbLoadBytesRelative = BuiltinFunc(platform.LinuxTag | 68)
FnFibLookup = BuiltinFunc(platform.LinuxTag | 69)
FnSockHashUpdate = BuiltinFunc(platform.LinuxTag | 70)
FnMsgRedirectHash = BuiltinFunc(platform.LinuxTag | 71)
FnSkRedirectHash = BuiltinFunc(platform.LinuxTag | 72)
FnLwtPushEncap = BuiltinFunc(platform.LinuxTag | 73)
FnLwtSeg6StoreBytes = BuiltinFunc(platform.LinuxTag | 74)
FnLwtSeg6AdjustSrh = BuiltinFunc(platform.LinuxTag | 75)
FnLwtSeg6Action = BuiltinFunc(platform.LinuxTag | 76)
FnRcRepeat = BuiltinFunc(platform.LinuxTag | 77)
FnRcKeydown = BuiltinFunc(platform.LinuxTag | 78)
FnSkbCgroupId = BuiltinFunc(platform.LinuxTag | 79)
FnGetCurrentCgroupId = BuiltinFunc(platform.LinuxTag | 80)
FnGetLocalStorage = BuiltinFunc(platform.LinuxTag | 81)
FnSkSelectReuseport = BuiltinFunc(platform.LinuxTag | 82)
FnSkbAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 83)
FnSkLookupTcp = BuiltinFunc(platform.LinuxTag | 84)
FnSkLookupUdp = BuiltinFunc(platform.LinuxTag | 85)
FnSkRelease = BuiltinFunc(platform.LinuxTag | 86)
FnMapPushElem = BuiltinFunc(platform.LinuxTag | 87)
FnMapPopElem = BuiltinFunc(platform.LinuxTag | 88)
FnMapPeekElem = BuiltinFunc(platform.LinuxTag | 89)
FnMsgPushData = BuiltinFunc(platform.LinuxTag | 90)
FnMsgPopData = BuiltinFunc(platform.LinuxTag | 91)
FnRcPointerRel = BuiltinFunc(platform.LinuxTag | 92)
FnSpinLock = BuiltinFunc(platform.LinuxTag | 93)
FnSpinUnlock = BuiltinFunc(platform.LinuxTag | 94)
FnSkFullsock = BuiltinFunc(platform.LinuxTag | 95)
FnTcpSock = BuiltinFunc(platform.LinuxTag | 96)
FnSkbEcnSetCe = BuiltinFunc(platform.LinuxTag | 97)
FnGetListenerSock = BuiltinFunc(platform.LinuxTag | 98)
FnSkcLookupTcp = BuiltinFunc(platform.LinuxTag | 99)
FnTcpCheckSyncookie = BuiltinFunc(platform.LinuxTag | 100)
FnSysctlGetName = BuiltinFunc(platform.LinuxTag | 101)
FnSysctlGetCurrentValue = BuiltinFunc(platform.LinuxTag | 102)
FnSysctlGetNewValue = BuiltinFunc(platform.LinuxTag | 103)
FnSysctlSetNewValue = BuiltinFunc(platform.LinuxTag | 104)
FnStrtol = BuiltinFunc(platform.LinuxTag | 105)
FnStrtoul = BuiltinFunc(platform.LinuxTag | 106)
FnSkStorageGet = BuiltinFunc(platform.LinuxTag | 107)
FnSkStorageDelete = BuiltinFunc(platform.LinuxTag | 108)
FnSendSignal = BuiltinFunc(platform.LinuxTag | 109)
FnTcpGenSyncookie = BuiltinFunc(platform.LinuxTag | 110)
FnSkbOutput = BuiltinFunc(platform.LinuxTag | 111)
FnProbeReadUser = BuiltinFunc(platform.LinuxTag | 112)
FnProbeReadKernel = BuiltinFunc(platform.LinuxTag | 113)
FnProbeReadUserStr = BuiltinFunc(platform.LinuxTag | 114)
FnProbeReadKernelStr = BuiltinFunc(platform.LinuxTag | 115)
FnTcpSendAck = BuiltinFunc(platform.LinuxTag | 116)
FnSendSignalThread = BuiltinFunc(platform.LinuxTag | 117)
FnJiffies64 = BuiltinFunc(platform.LinuxTag | 118)
FnReadBranchRecords = BuiltinFunc(platform.LinuxTag | 119)
FnGetNsCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 120)
FnXdpOutput = BuiltinFunc(platform.LinuxTag | 121)
FnGetNetnsCookie = BuiltinFunc(platform.LinuxTag | 122)
FnGetCurrentAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 123)
FnSkAssign = BuiltinFunc(platform.LinuxTag | 124)
FnKtimeGetBootNs = BuiltinFunc(platform.LinuxTag | 125)
FnSeqPrintf = BuiltinFunc(platform.LinuxTag | 126)
FnSeqWrite = BuiltinFunc(platform.LinuxTag | 127)
FnSkCgroupId = BuiltinFunc(platform.LinuxTag | 128)
FnSkAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 129)
FnRingbufOutput = BuiltinFunc(platform.LinuxTag | 130)
FnRingbufReserve = BuiltinFunc(platform.LinuxTag | 131)
FnRingbufSubmit = BuiltinFunc(platform.LinuxTag | 132)
FnRingbufDiscard = BuiltinFunc(platform.LinuxTag | 133)
FnRingbufQuery = BuiltinFunc(platform.LinuxTag | 134)
FnCsumLevel = BuiltinFunc(platform.LinuxTag | 135)
FnSkcToTcp6Sock = BuiltinFunc(platform.LinuxTag | 136)
FnSkcToTcpSock = BuiltinFunc(platform.LinuxTag | 137)
FnSkcToTcpTimewaitSock = BuiltinFunc(platform.LinuxTag | 138)
FnSkcToTcpRequestSock = BuiltinFunc(platform.LinuxTag | 139)
FnSkcToUdp6Sock = BuiltinFunc(platform.LinuxTag | 140)
FnGetTaskStack = BuiltinFunc(platform.LinuxTag | 141)
FnLoadHdrOpt = BuiltinFunc(platform.LinuxTag | 142)
FnStoreHdrOpt = BuiltinFunc(platform.LinuxTag | 143)
FnReserveHdrOpt = BuiltinFunc(platform.LinuxTag | 144)
FnInodeStorageGet = BuiltinFunc(platform.LinuxTag | 145)
FnInodeStorageDelete = BuiltinFunc(platform.LinuxTag | 146)
FnDPath = BuiltinFunc(platform.LinuxTag | 147)
FnCopyFromUser = BuiltinFunc(platform.LinuxTag | 148)
FnSnprintfBtf = BuiltinFunc(platform.LinuxTag | 149)
FnSeqPrintfBtf = BuiltinFunc(platform.LinuxTag | 150)
FnSkbCgroupClassid = BuiltinFunc(platform.LinuxTag | 151)
FnRedirectNeigh = BuiltinFunc(platform.LinuxTag | 152)
FnPerCpuPtr = BuiltinFunc(platform.LinuxTag | 153)
FnThisCpuPtr = BuiltinFunc(platform.LinuxTag | 154)
FnRedirectPeer = BuiltinFunc(platform.LinuxTag | 155)
FnTaskStorageGet = BuiltinFunc(platform.LinuxTag | 156)
FnTaskStorageDelete = BuiltinFunc(platform.LinuxTag | 157)
FnGetCurrentTaskBtf = BuiltinFunc(platform.LinuxTag | 158)
FnBprmOptsSet = BuiltinFunc(platform.LinuxTag | 159)
FnKtimeGetCoarseNs = BuiltinFunc(platform.LinuxTag | 160)
FnImaInodeHash = BuiltinFunc(platform.LinuxTag | 161)
FnSockFromFile = BuiltinFunc(platform.LinuxTag | 162)
FnCheckMtu = BuiltinFunc(platform.LinuxTag | 163)
FnForEachMapElem = BuiltinFunc(platform.LinuxTag | 164)
FnSnprintf = BuiltinFunc(platform.LinuxTag | 165)
FnSysBpf = BuiltinFunc(platform.LinuxTag | 166)
FnBtfFindByNameKind = BuiltinFunc(platform.LinuxTag | 167)
FnSysClose = BuiltinFunc(platform.LinuxTag | 168)
FnTimerInit = BuiltinFunc(platform.LinuxTag | 169)
FnTimerSetCallback = BuiltinFunc(platform.LinuxTag | 170)
FnTimerStart = BuiltinFunc(platform.LinuxTag | 171)
FnTimerCancel = BuiltinFunc(platform.LinuxTag | 172)
FnGetFuncIp = BuiltinFunc(platform.LinuxTag | 173)
FnGetAttachCookie = BuiltinFunc(platform.LinuxTag | 174)
FnTaskPtRegs = BuiltinFunc(platform.LinuxTag | 175)
FnGetBranchSnapshot = BuiltinFunc(platform.LinuxTag | 176)
FnTraceVprintk = BuiltinFunc(platform.LinuxTag | 177)
FnSkcToUnixSock = BuiltinFunc(platform.LinuxTag | 178)
FnKallsymsLookupName = BuiltinFunc(platform.LinuxTag | 179)
FnFindVma = BuiltinFunc(platform.LinuxTag | 180)
FnLoop = BuiltinFunc(platform.LinuxTag | 181)
FnStrncmp = BuiltinFunc(platform.LinuxTag | 182)
FnGetFuncArg = BuiltinFunc(platform.LinuxTag | 183)
FnGetFuncRet = BuiltinFunc(platform.LinuxTag | 184)
FnGetFuncArgCnt = BuiltinFunc(platform.LinuxTag | 185)
FnGetRetval = BuiltinFunc(platform.LinuxTag | 186)
FnSetRetval = BuiltinFunc(platform.LinuxTag | 187)
FnXdpGetBuffLen = BuiltinFunc(platform.LinuxTag | 188)
FnXdpLoadBytes = BuiltinFunc(platform.LinuxTag | 189)
FnXdpStoreBytes = BuiltinFunc(platform.LinuxTag | 190)
FnCopyFromUserTask = BuiltinFunc(platform.LinuxTag | 191)
FnSkbSetTstamp = BuiltinFunc(platform.LinuxTag | 192)
FnImaFileHash = BuiltinFunc(platform.LinuxTag | 193)
FnKptrXchg = BuiltinFunc(platform.LinuxTag | 194)
FnMapLookupPercpuElem = BuiltinFunc(platform.LinuxTag | 195)
FnSkcToMptcpSock = BuiltinFunc(platform.LinuxTag | 196)
FnDynptrFromMem = BuiltinFunc(platform.LinuxTag | 197)
FnRingbufReserveDynptr = BuiltinFunc(platform.LinuxTag | 198)
FnRingbufSubmitDynptr = BuiltinFunc(platform.LinuxTag | 199)
FnRingbufDiscardDynptr = BuiltinFunc(platform.LinuxTag | 200)
FnDynptrRead = BuiltinFunc(platform.LinuxTag | 201)
FnDynptrWrite = BuiltinFunc(platform.LinuxTag | 202)
FnDynptrData = BuiltinFunc(platform.LinuxTag | 203)
FnTcpRawGenSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 204)
FnTcpRawGenSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 205)
FnTcpRawCheckSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 206)
FnTcpRawCheckSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 207)
FnKtimeGetTaiNs = BuiltinFunc(platform.LinuxTag | 208)
FnUserRingbufDrain = BuiltinFunc(platform.LinuxTag | 209)
FnCgrpStorageGet = BuiltinFunc(platform.LinuxTag | 210)
FnCgrpStorageDelete = BuiltinFunc(platform.LinuxTag | 211)
)

276
go/vendor/github.com/cilium/ebpf/asm/func_string.go generated vendored Normal file
View File

@@ -0,0 +1,276 @@
// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[FnUnspec-0]
_ = x[FnMapLookupElem-1]
_ = x[FnMapUpdateElem-2]
_ = x[FnMapDeleteElem-3]
_ = x[FnProbeRead-4]
_ = x[FnKtimeGetNs-5]
_ = x[FnTracePrintk-6]
_ = x[FnGetPrandomU32-7]
_ = x[FnGetSmpProcessorId-8]
_ = x[FnSkbStoreBytes-9]
_ = x[FnL3CsumReplace-10]
_ = x[FnL4CsumReplace-11]
_ = x[FnTailCall-12]
_ = x[FnCloneRedirect-13]
_ = x[FnGetCurrentPidTgid-14]
_ = x[FnGetCurrentUidGid-15]
_ = x[FnGetCurrentComm-16]
_ = x[FnGetCgroupClassid-17]
_ = x[FnSkbVlanPush-18]
_ = x[FnSkbVlanPop-19]
_ = x[FnSkbGetTunnelKey-20]
_ = x[FnSkbSetTunnelKey-21]
_ = x[FnPerfEventRead-22]
_ = x[FnRedirect-23]
_ = x[FnGetRouteRealm-24]
_ = x[FnPerfEventOutput-25]
_ = x[FnSkbLoadBytes-26]
_ = x[FnGetStackid-27]
_ = x[FnCsumDiff-28]
_ = x[FnSkbGetTunnelOpt-29]
_ = x[FnSkbSetTunnelOpt-30]
_ = x[FnSkbChangeProto-31]
_ = x[FnSkbChangeType-32]
_ = x[FnSkbUnderCgroup-33]
_ = x[FnGetHashRecalc-34]
_ = x[FnGetCurrentTask-35]
_ = x[FnProbeWriteUser-36]
_ = x[FnCurrentTaskUnderCgroup-37]
_ = x[FnSkbChangeTail-38]
_ = x[FnSkbPullData-39]
_ = x[FnCsumUpdate-40]
_ = x[FnSetHashInvalid-41]
_ = x[FnGetNumaNodeId-42]
_ = x[FnSkbChangeHead-43]
_ = x[FnXdpAdjustHead-44]
_ = x[FnProbeReadStr-45]
_ = x[FnGetSocketCookie-46]
_ = x[FnGetSocketUid-47]
_ = x[FnSetHash-48]
_ = x[FnSetsockopt-49]
_ = x[FnSkbAdjustRoom-50]
_ = x[FnRedirectMap-51]
_ = x[FnSkRedirectMap-52]
_ = x[FnSockMapUpdate-53]
_ = x[FnXdpAdjustMeta-54]
_ = x[FnPerfEventReadValue-55]
_ = x[FnPerfProgReadValue-56]
_ = x[FnGetsockopt-57]
_ = x[FnOverrideReturn-58]
_ = x[FnSockOpsCbFlagsSet-59]
_ = x[FnMsgRedirectMap-60]
_ = x[FnMsgApplyBytes-61]
_ = x[FnMsgCorkBytes-62]
_ = x[FnMsgPullData-63]
_ = x[FnBind-64]
_ = x[FnXdpAdjustTail-65]
_ = x[FnSkbGetXfrmState-66]
_ = x[FnGetStack-67]
_ = x[FnSkbLoadBytesRelative-68]
_ = x[FnFibLookup-69]
_ = x[FnSockHashUpdate-70]
_ = x[FnMsgRedirectHash-71]
_ = x[FnSkRedirectHash-72]
_ = x[FnLwtPushEncap-73]
_ = x[FnLwtSeg6StoreBytes-74]
_ = x[FnLwtSeg6AdjustSrh-75]
_ = x[FnLwtSeg6Action-76]
_ = x[FnRcRepeat-77]
_ = x[FnRcKeydown-78]
_ = x[FnSkbCgroupId-79]
_ = x[FnGetCurrentCgroupId-80]
_ = x[FnGetLocalStorage-81]
_ = x[FnSkSelectReuseport-82]
_ = x[FnSkbAncestorCgroupId-83]
_ = x[FnSkLookupTcp-84]
_ = x[FnSkLookupUdp-85]
_ = x[FnSkRelease-86]
_ = x[FnMapPushElem-87]
_ = x[FnMapPopElem-88]
_ = x[FnMapPeekElem-89]
_ = x[FnMsgPushData-90]
_ = x[FnMsgPopData-91]
_ = x[FnRcPointerRel-92]
_ = x[FnSpinLock-93]
_ = x[FnSpinUnlock-94]
_ = x[FnSkFullsock-95]
_ = x[FnTcpSock-96]
_ = x[FnSkbEcnSetCe-97]
_ = x[FnGetListenerSock-98]
_ = x[FnSkcLookupTcp-99]
_ = x[FnTcpCheckSyncookie-100]
_ = x[FnSysctlGetName-101]
_ = x[FnSysctlGetCurrentValue-102]
_ = x[FnSysctlGetNewValue-103]
_ = x[FnSysctlSetNewValue-104]
_ = x[FnStrtol-105]
_ = x[FnStrtoul-106]
_ = x[FnSkStorageGet-107]
_ = x[FnSkStorageDelete-108]
_ = x[FnSendSignal-109]
_ = x[FnTcpGenSyncookie-110]
_ = x[FnSkbOutput-111]
_ = x[FnProbeReadUser-112]
_ = x[FnProbeReadKernel-113]
_ = x[FnProbeReadUserStr-114]
_ = x[FnProbeReadKernelStr-115]
_ = x[FnTcpSendAck-116]
_ = x[FnSendSignalThread-117]
_ = x[FnJiffies64-118]
_ = x[FnReadBranchRecords-119]
_ = x[FnGetNsCurrentPidTgid-120]
_ = x[FnXdpOutput-121]
_ = x[FnGetNetnsCookie-122]
_ = x[FnGetCurrentAncestorCgroupId-123]
_ = x[FnSkAssign-124]
_ = x[FnKtimeGetBootNs-125]
_ = x[FnSeqPrintf-126]
_ = x[FnSeqWrite-127]
_ = x[FnSkCgroupId-128]
_ = x[FnSkAncestorCgroupId-129]
_ = x[FnRingbufOutput-130]
_ = x[FnRingbufReserve-131]
_ = x[FnRingbufSubmit-132]
_ = x[FnRingbufDiscard-133]
_ = x[FnRingbufQuery-134]
_ = x[FnCsumLevel-135]
_ = x[FnSkcToTcp6Sock-136]
_ = x[FnSkcToTcpSock-137]
_ = x[FnSkcToTcpTimewaitSock-138]
_ = x[FnSkcToTcpRequestSock-139]
_ = x[FnSkcToUdp6Sock-140]
_ = x[FnGetTaskStack-141]
_ = x[FnLoadHdrOpt-142]
_ = x[FnStoreHdrOpt-143]
_ = x[FnReserveHdrOpt-144]
_ = x[FnInodeStorageGet-145]
_ = x[FnInodeStorageDelete-146]
_ = x[FnDPath-147]
_ = x[FnCopyFromUser-148]
_ = x[FnSnprintfBtf-149]
_ = x[FnSeqPrintfBtf-150]
_ = x[FnSkbCgroupClassid-151]
_ = x[FnRedirectNeigh-152]
_ = x[FnPerCpuPtr-153]
_ = x[FnThisCpuPtr-154]
_ = x[FnRedirectPeer-155]
_ = x[FnTaskStorageGet-156]
_ = x[FnTaskStorageDelete-157]
_ = x[FnGetCurrentTaskBtf-158]
_ = x[FnBprmOptsSet-159]
_ = x[FnKtimeGetCoarseNs-160]
_ = x[FnImaInodeHash-161]
_ = x[FnSockFromFile-162]
_ = x[FnCheckMtu-163]
_ = x[FnForEachMapElem-164]
_ = x[FnSnprintf-165]
_ = x[FnSysBpf-166]
_ = x[FnBtfFindByNameKind-167]
_ = x[FnSysClose-168]
_ = x[FnTimerInit-169]
_ = x[FnTimerSetCallback-170]
_ = x[FnTimerStart-171]
_ = x[FnTimerCancel-172]
_ = x[FnGetFuncIp-173]
_ = x[FnGetAttachCookie-174]
_ = x[FnTaskPtRegs-175]
_ = x[FnGetBranchSnapshot-176]
_ = x[FnTraceVprintk-177]
_ = x[FnSkcToUnixSock-178]
_ = x[FnKallsymsLookupName-179]
_ = x[FnFindVma-180]
_ = x[FnLoop-181]
_ = x[FnStrncmp-182]
_ = x[FnGetFuncArg-183]
_ = x[FnGetFuncRet-184]
_ = x[FnGetFuncArgCnt-185]
_ = x[FnGetRetval-186]
_ = x[FnSetRetval-187]
_ = x[FnXdpGetBuffLen-188]
_ = x[FnXdpLoadBytes-189]
_ = x[FnXdpStoreBytes-190]
_ = x[FnCopyFromUserTask-191]
_ = x[FnSkbSetTstamp-192]
_ = x[FnImaFileHash-193]
_ = x[FnKptrXchg-194]
_ = x[FnMapLookupPercpuElem-195]
_ = x[FnSkcToMptcpSock-196]
_ = x[FnDynptrFromMem-197]
_ = x[FnRingbufReserveDynptr-198]
_ = x[FnRingbufSubmitDynptr-199]
_ = x[FnRingbufDiscardDynptr-200]
_ = x[FnDynptrRead-201]
_ = x[FnDynptrWrite-202]
_ = x[FnDynptrData-203]
_ = x[FnTcpRawGenSyncookieIpv4-204]
_ = x[FnTcpRawGenSyncookieIpv6-205]
_ = x[FnTcpRawCheckSyncookieIpv4-206]
_ = x[FnTcpRawCheckSyncookieIpv6-207]
_ = x[FnKtimeGetTaiNs-208]
_ = x[FnUserRingbufDrain-209]
_ = x[FnCgrpStorageGet-210]
_ = x[FnCgrpStorageDelete-211]
_ = x[WindowsFnMapLookupElem-268435457]
_ = x[WindowsFnMapUpdateElem-268435458]
_ = x[WindowsFnMapDeleteElem-268435459]
_ = x[WindowsFnMapLookupAndDeleteElem-268435460]
_ = x[WindowsFnTailCall-268435461]
_ = x[WindowsFnGetPrandomU32-268435462]
_ = x[WindowsFnKtimeGetBootNs-268435463]
_ = x[WindowsFnGetSmpProcessorId-268435464]
_ = x[WindowsFnKtimeGetNs-268435465]
_ = x[WindowsFnCsumDiff-268435466]
_ = x[WindowsFnRingbufOutput-268435467]
_ = x[WindowsFnTracePrintk2-268435468]
_ = x[WindowsFnTracePrintk3-268435469]
_ = x[WindowsFnTracePrintk4-268435470]
_ = x[WindowsFnTracePrintk5-268435471]
_ = x[WindowsFnMapPushElem-268435472]
_ = x[WindowsFnMapPopElem-268435473]
_ = x[WindowsFnMapPeekElem-268435474]
_ = x[WindowsFnGetCurrentPidTgid-268435475]
_ = x[WindowsFnGetCurrentLogonId-268435476]
_ = x[WindowsFnIsCurrentAdmin-268435477]
_ = x[WindowsFnMemcpy-268435478]
_ = x[WindowsFnMemcmp-268435479]
_ = x[WindowsFnMemset-268435480]
_ = x[WindowsFnMemmove-268435481]
_ = x[WindowsFnGetSocketCookie-268435482]
_ = x[WindowsFnStrncpyS-268435483]
_ = x[WindowsFnStrncatS-268435484]
_ = x[WindowsFnStrnlenS-268435485]
_ = x[WindowsFnKtimeGetBootMs-268435486]
_ = x[WindowsFnKtimeGetMs-268435487]
}
const (
_BuiltinFunc_name_0 = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDelete"
_BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpyWindowsFnMemcmpWindowsFnMemsetWindowsFnMemmoveWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMs"
)
var (
_BuiltinFunc_index_0 = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165}
_BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 476, 491, 506, 522, 546, 563, 580, 597, 620, 639}
)
func (i BuiltinFunc) String() string {
switch {
case i <= 211:
return _BuiltinFunc_name_0[_BuiltinFunc_index_0[i]:_BuiltinFunc_index_0[i+1]]
case 268435457 <= i && i <= 268435487:
i -= 268435457
return _BuiltinFunc_name_1[_BuiltinFunc_index_1[i]:_BuiltinFunc_index_1[i+1]]
default:
return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

44
go/vendor/github.com/cilium/ebpf/asm/func_win.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT.
package asm
// Code in this file is derived from eBPF for Windows, available under the MIT License.
import (
"github.com/cilium/ebpf/internal/platform"
)
// Built-in functions (Windows).
const (
WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1)
WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2)
WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3)
WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4)
WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5)
WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6)
WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7)
WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8)
WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9)
WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10)
WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11)
WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12)
WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13)
WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14)
WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15)
WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16)
WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17)
WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18)
WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19)
WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20)
WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21)
WindowsFnMemcpy = BuiltinFunc(platform.WindowsTag | 22)
WindowsFnMemcmp = BuiltinFunc(platform.WindowsTag | 23)
WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24)
WindowsFnMemmove = BuiltinFunc(platform.WindowsTag | 25)
WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26)
WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27)
WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28)
WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29)
WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30)
WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31)
)

978
go/vendor/github.com/cilium/ebpf/asm/instruction.go generated vendored Normal file
View File

@@ -0,0 +1,978 @@
package asm
import (
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"sort"
"strings"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/platform"
"github.com/cilium/ebpf/internal/sys"
)
// InstructionSize is the size of a BPF instruction in bytes
const InstructionSize = 8
// RawInstructionOffset is an offset in units of raw BPF instructions.
type RawInstructionOffset uint64
var ErrUnreferencedSymbol = errors.New("unreferenced symbol")
var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference")
var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference")
// Bytes returns the offset of an instruction in bytes.
func (rio RawInstructionOffset) Bytes() uint64 {
return uint64(rio) * InstructionSize
}
// Instruction is a single eBPF instruction.
type Instruction struct {
OpCode OpCode
Dst Register
Src Register
Offset int16
Constant int64
// Metadata contains optional metadata about this instruction.
Metadata Metadata
}
// Unmarshal decodes a BPF instruction.
func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder, platform string) error {
data := make([]byte, InstructionSize)
if _, err := io.ReadFull(r, data); err != nil {
return err
}
ins.OpCode = OpCode(data[0])
regs := data[1]
switch bo {
case binary.LittleEndian:
ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4)
case binary.BigEndian:
ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf)
}
ins.Offset = int16(bo.Uint16(data[2:4]))
// Convert to int32 before widening to int64
// to ensure the signed bit is carried over.
ins.Constant = int64(int32(bo.Uint32(data[4:8])))
if ins.IsBuiltinCall() {
if ins.Constant >= 0 {
// Leave negative constants from the instruction stream
// unchanged. These are sometimes used as placeholders for later
// patching.
// This relies on not having a valid platform tag with a high bit set.
fn, err := BuiltinFuncForPlatform(platform, uint32(ins.Constant))
if err != nil {
return err
}
ins.Constant = int64(fn)
}
} else if ins.OpCode.Class().IsALU() {
switch ins.OpCode.ALUOp() {
case Div:
if ins.Offset == 1 {
ins.OpCode = ins.OpCode.SetALUOp(SDiv)
ins.Offset = 0
}
case Mod:
if ins.Offset == 1 {
ins.OpCode = ins.OpCode.SetALUOp(SMod)
ins.Offset = 0
}
case Mov:
switch ins.Offset {
case 8:
ins.OpCode = ins.OpCode.SetALUOp(MovSX8)
ins.Offset = 0
case 16:
ins.OpCode = ins.OpCode.SetALUOp(MovSX16)
ins.Offset = 0
case 32:
ins.OpCode = ins.OpCode.SetALUOp(MovSX32)
ins.Offset = 0
}
}
} else if ins.OpCode.Class() == StXClass &&
ins.OpCode.Mode() == AtomicMode {
// For atomic ops, part of the opcode is stored in the
// constant field. Shift over 8 bytes so we can OR with the actual opcode and
// apply `atomicMask` to avoid merging unknown bits that may be added in the future.
ins.OpCode |= (OpCode((ins.Constant << 8)) & atomicMask)
}
if !ins.OpCode.IsDWordLoad() {
return nil
}
// Pull another instruction from the stream to retrieve the second
// half of the 64-bit immediate value.
if _, err := io.ReadFull(r, data); err != nil {
// No Wrap, to avoid io.EOF clash
return errors.New("64bit immediate is missing second half")
}
// Require that all fields other than the value are zero.
if bo.Uint32(data[0:4]) != 0 {
return errors.New("64bit immediate has non-zero fields")
}
cons1 := uint32(ins.Constant)
cons2 := int32(bo.Uint32(data[4:8]))
ins.Constant = int64(cons2)<<32 | int64(cons1)
return nil
}
// Marshal encodes a BPF instruction.
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
if ins.OpCode == InvalidOpCode {
return 0, errors.New("invalid opcode")
}
isDWordLoad := ins.OpCode.IsDWordLoad()
cons := int32(ins.Constant)
if isDWordLoad {
// Encode least significant 32bit first for 64bit operations.
cons = int32(uint32(ins.Constant))
}
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
if err != nil {
return 0, fmt.Errorf("can't marshal registers: %s", err)
}
if ins.IsBuiltinCall() {
fn := BuiltinFunc(ins.Constant)
plat, value := platform.DecodeConstant(fn)
if plat != platform.Native {
return 0, fmt.Errorf("function %s (%s): %w", fn, plat, internal.ErrNotSupportedOnOS)
}
cons = int32(value)
} else if ins.OpCode.Class().IsALU() {
newOffset := int16(0)
switch ins.OpCode.ALUOp() {
case SDiv:
ins.OpCode = ins.OpCode.SetALUOp(Div)
newOffset = 1
case SMod:
ins.OpCode = ins.OpCode.SetALUOp(Mod)
newOffset = 1
case MovSX8:
ins.OpCode = ins.OpCode.SetALUOp(Mov)
newOffset = 8
case MovSX16:
ins.OpCode = ins.OpCode.SetALUOp(Mov)
newOffset = 16
case MovSX32:
ins.OpCode = ins.OpCode.SetALUOp(Mov)
newOffset = 32
}
if newOffset != 0 && ins.Offset != 0 {
return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins)
}
ins.Offset = newOffset
} else if atomic := ins.OpCode.AtomicOp(); atomic != InvalidAtomic {
ins.OpCode = ins.OpCode &^ atomicMask
ins.Constant = int64(atomic >> 8)
}
op, err := ins.OpCode.bpfOpCode()
if err != nil {
return 0, err
}
data := make([]byte, InstructionSize)
data[0] = op
data[1] = byte(regs)
bo.PutUint16(data[2:4], uint16(ins.Offset))
bo.PutUint32(data[4:8], uint32(cons))
if _, err := w.Write(data); err != nil {
return 0, err
}
if !isDWordLoad {
return InstructionSize, nil
}
// The first half of the second part of a double-wide instruction
// must be zero. The second half carries the value.
bo.PutUint32(data[0:4], 0)
bo.PutUint32(data[4:8], uint32(ins.Constant>>32))
if _, err := w.Write(data); err != nil {
return 0, err
}
return 2 * InstructionSize, nil
}
// AssociateMap associates a Map with this Instruction.
//
// Implicitly clears the Instruction's Reference field.
//
// Returns an error if the Instruction is not a map load.
func (ins *Instruction) AssociateMap(m FDer) error {
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.Metadata.Set(referenceMeta{}, nil)
ins.Metadata.Set(mapMeta{}, m)
return nil
}
// RewriteMapPtr changes an instruction to use a new map fd.
//
// Returns an error if the instruction doesn't load a map.
//
// Deprecated: use AssociateMap instead. If you cannot provide a Map,
// wrap an fd in a type implementing FDer.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.encodeMapFD(fd)
return nil
}
func (ins *Instruction) encodeMapFD(fd int) {
// Preserve the offset value for direct map loads.
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
rawFd := uint64(uint32(fd))
ins.Constant = int64(offset | rawFd)
}
// MapPtr returns the map fd for this instruction.
//
// The result is undefined if the instruction is not a load from a map,
// see IsLoadFromMap.
//
// Deprecated: use Map() instead.
func (ins *Instruction) MapPtr() int {
// If there is a map associated with the instruction, return its FD.
if fd := ins.Metadata.Get(mapMeta{}); fd != nil {
return fd.(FDer).FD()
}
// Fall back to the fd stored in the Constant field
return ins.mapFd()
}
// mapFd returns the map file descriptor stored in the 32 least significant
// bits of ins' Constant field.
func (ins *Instruction) mapFd() int {
return int(int32(ins.Constant))
}
// RewriteMapOffset changes the offset of a direct load from a map.
//
// Returns an error if the instruction is not a direct load.
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapValue {
return errors.New("not a direct load from a map")
}
fd := uint64(ins.Constant) & math.MaxUint32
ins.Constant = int64(uint64(offset)<<32 | fd)
return nil
}
func (ins *Instruction) mapOffset() uint32 {
return uint32(uint64(ins.Constant) >> 32)
}
// IsLoadFromMap returns true if the instruction loads from a map.
//
// This covers both loading the map pointer and direct map value loads.
func (ins *Instruction) IsLoadFromMap() bool {
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
}
// IsFunctionCall returns true if the instruction calls another BPF function.
//
// This is not the same thing as a BPF helper call.
func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
// IsKfuncCall returns true if the instruction calls a kfunc.
//
// This is not the same thing as a BPF helper call.
func (ins *Instruction) IsKfuncCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall
}
// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
func (ins *Instruction) IsLoadOfFunctionPointer() bool {
return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
}
// IsFunctionReference returns true if the instruction references another BPF
// function, either by invoking a Call jump operation or by loading a function
// pointer.
func (ins *Instruction) IsFunctionReference() bool {
return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer()
}
// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call.
func (ins *Instruction) IsBuiltinCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0
}
// IsConstantLoad returns true if the instruction loads a constant of the
// given size.
func (ins *Instruction) IsConstantLoad(size Size) bool {
return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
}
// Format implements fmt.Formatter.
func (ins Instruction) Format(f fmt.State, c rune) {
if c != 'v' {
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
return
}
op := ins.OpCode
if op == InvalidOpCode {
fmt.Fprint(f, "INVALID")
return
}
// Omit trailing space for Exit
if op.JumpOp() == Exit {
fmt.Fprint(f, op)
return
}
if ins.IsLoadFromMap() {
fd := ins.mapFd()
m := ins.Map()
switch ins.Src {
case PseudoMapFD:
if m != nil {
fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m)
} else {
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
}
case PseudoMapValue:
if m != nil {
fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset())
} else {
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
}
}
goto ref
}
switch cls := op.Class(); {
case cls.isLoadOrStore():
fmt.Fprintf(f, "%v ", op)
switch op.Mode() {
case ImmMode:
fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
case AbsMode:
fmt.Fprintf(f, "imm: %d", ins.Constant)
case IndMode:
fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
case MemMode, MemSXMode:
fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
case AtomicMode:
fmt.Fprintf(f, "dst: %s src: %s off: %d", ins.Dst, ins.Src, ins.Offset)
}
case cls.IsALU():
fmt.Fprintf(f, "%v", op)
if op == Swap.Op(ImmSource) {
fmt.Fprintf(f, "%d", ins.Constant)
}
fmt.Fprintf(f, " dst: %s ", ins.Dst)
switch {
case op.ALUOp() == Swap:
break
case op.Source() == ImmSource:
fmt.Fprintf(f, "imm: %d", ins.Constant)
default:
fmt.Fprintf(f, "src: %s", ins.Src)
}
case cls.IsJump():
fmt.Fprintf(f, "%v ", op)
switch jop := op.JumpOp(); jop {
case Call:
switch ins.Src {
case PseudoCall:
// bpf-to-bpf call
fmt.Fprint(f, ins.Constant)
case PseudoKfuncCall:
// kfunc call
fmt.Fprintf(f, "Kfunc(%d)", ins.Constant)
default:
fmt.Fprint(f, BuiltinFunc(ins.Constant))
}
case Ja:
if ins.OpCode.Class() == Jump32Class {
fmt.Fprintf(f, "imm: %d", ins.Constant)
} else {
fmt.Fprintf(f, "off: %d", ins.Offset)
}
default:
fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
if op.Source() == ImmSource {
fmt.Fprintf(f, "imm: %d", ins.Constant)
} else {
fmt.Fprintf(f, "src: %s", ins.Src)
}
}
default:
fmt.Fprintf(f, "%v ", op)
}
ref:
if ins.Reference() != "" {
fmt.Fprintf(f, " <%s>", ins.Reference())
}
}
func (ins Instruction) equal(other Instruction) bool {
return ins.OpCode == other.OpCode &&
ins.Dst == other.Dst &&
ins.Src == other.Src &&
ins.Offset == other.Offset &&
ins.Constant == other.Constant
}
// Size returns the amount of bytes ins would occupy in binary form.
func (ins Instruction) Size() uint64 {
return uint64(InstructionSize * ins.OpCode.rawInstructions())
}
// WithMetadata sets the given Metadata on the Instruction. e.g. to copy
// Metadata from another Instruction when replacing it.
func (ins Instruction) WithMetadata(meta Metadata) Instruction {
ins.Metadata = meta
return ins
}
type symbolMeta struct{}
// WithSymbol marks the Instruction as a Symbol, which other Instructions
// can point to using corresponding calls to WithReference.
func (ins Instruction) WithSymbol(name string) Instruction {
ins.Metadata.Set(symbolMeta{}, name)
return ins
}
// Sym creates a symbol.
//
// Deprecated: use WithSymbol instead.
func (ins Instruction) Sym(name string) Instruction {
return ins.WithSymbol(name)
}
// Symbol returns the value ins has been marked with using WithSymbol,
// otherwise returns an empty string. A symbol is often an Instruction
// at the start of a function body.
func (ins Instruction) Symbol() string {
sym, _ := ins.Metadata.Get(symbolMeta{}).(string)
return sym
}
type referenceMeta struct{}
// WithReference makes ins reference another Symbol or map by name.
func (ins Instruction) WithReference(ref string) Instruction {
ins.Metadata.Set(referenceMeta{}, ref)
return ins
}
// Reference returns the Symbol or map name referenced by ins, if any.
func (ins Instruction) Reference() string {
ref, _ := ins.Metadata.Get(referenceMeta{}).(string)
return ref
}
type mapMeta struct{}
// Map returns the Map referenced by ins, if any.
// An Instruction will contain a Map if e.g. it references an existing,
// pinned map that was opened during ELF loading.
func (ins Instruction) Map() FDer {
fd, _ := ins.Metadata.Get(mapMeta{}).(FDer)
return fd
}
type sourceMeta struct{}
// WithSource adds source information about the Instruction.
func (ins Instruction) WithSource(src fmt.Stringer) Instruction {
ins.Metadata.Set(sourceMeta{}, src)
return ins
}
// Source returns source information about the Instruction. The field is
// present when the compiler emits BTF line info about the Instruction and
// usually contains the line of source code responsible for it.
func (ins Instruction) Source() fmt.Stringer {
str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer)
return str
}
// A Comment can be passed to Instruction.WithSource to add a comment
// to an instruction.
type Comment string
func (s Comment) String() string {
return string(s)
}
// FDer represents a resource tied to an underlying file descriptor.
// Used as a stand-in for e.g. ebpf.Map since that type cannot be
// imported here and FD() is the only method we rely on.
type FDer interface {
FD() int
}
// Instructions is an eBPF program.
type Instructions []Instruction
// AppendInstructions decodes [Instruction] from r and appends them to insns.
func AppendInstructions(insns Instructions, r io.Reader, bo binary.ByteOrder, platform string) (Instructions, error) {
var offset uint64
for {
var ins Instruction
err := ins.Unmarshal(r, bo, platform)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, fmt.Errorf("offset %d: %w", offset, err)
}
insns = append(insns, ins)
offset += ins.Size()
}
return insns, nil
}
// Name returns the name of the function insns belongs to, if any.
func (insns Instructions) Name() string {
if len(insns) == 0 {
return ""
}
return insns[0].Symbol()
}
func (insns Instructions) String() string {
return fmt.Sprint(insns)
}
// Size returns the amount of bytes insns would occupy in binary form.
func (insns Instructions) Size() uint64 {
var sum uint64
for _, ins := range insns {
sum += ins.Size()
}
return sum
}
// AssociateMap updates all Instructions that Reference the given symbol
// to point to an existing Map m instead.
//
// Returns ErrUnreferencedSymbol error if no references to symbol are found
// in insns. If symbol is anything else than the symbol name of map (e.g.
// a bpf2bpf subprogram), an error is returned.
func (insns Instructions) AssociateMap(symbol string, m FDer) error {
if symbol == "" {
return errors.New("empty symbol")
}
var found bool
for i := range insns {
ins := &insns[i]
if ins.Reference() != symbol {
continue
}
if err := ins.AssociateMap(m); err != nil {
return err
}
found = true
}
if !found {
return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
}
return nil
}
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
//
// Returns ErrUnreferencedSymbol if the symbol isn't used.
//
// Deprecated: use AssociateMap instead.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
if symbol == "" {
return errors.New("empty symbol")
}
var found bool
for i := range insns {
ins := &insns[i]
if ins.Reference() != symbol {
continue
}
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.encodeMapFD(fd)
found = true
}
if !found {
return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
}
return nil
}
// SymbolOffsets returns the set of symbols and their offset in
// the instructions.
func (insns Instructions) SymbolOffsets() (map[string]int, error) {
offsets := make(map[string]int)
for i, ins := range insns {
if ins.Symbol() == "" {
continue
}
if _, ok := offsets[ins.Symbol()]; ok {
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol())
}
offsets[ins.Symbol()] = i
}
return offsets, nil
}
// FunctionReferences returns a set of symbol names these Instructions make
// bpf-to-bpf calls to.
func (insns Instructions) FunctionReferences() []string {
calls := make(map[string]struct{})
for _, ins := range insns {
if ins.Constant != -1 {
// BPF-to-BPF calls have -1 constants.
continue
}
if ins.Reference() == "" {
continue
}
if !ins.IsFunctionReference() {
continue
}
calls[ins.Reference()] = struct{}{}
}
result := make([]string, 0, len(calls))
for call := range calls {
result = append(result, call)
}
sort.Strings(result)
return result
}
// ReferenceOffsets returns the set of references and their offset in
// the instructions.
func (insns Instructions) ReferenceOffsets() map[string][]int {
offsets := make(map[string][]int)
for i, ins := range insns {
if ins.Reference() == "" {
continue
}
offsets[ins.Reference()] = append(offsets[ins.Reference()], i)
}
return offsets
}
// Format implements fmt.Formatter.
//
// You can control indentation of symbols by
// specifying a width. Setting a precision controls the indentation of
// instructions.
// The default character is a tab, which can be overridden by specifying
// the ' ' space flag.
func (insns Instructions) Format(f fmt.State, c rune) {
if c != 's' && c != 'v' {
fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
return
}
// Precision is better in this case, because it allows
// specifying 0 padding easily.
padding, ok := f.Precision()
if !ok {
padding = 1
}
indent := strings.Repeat("\t", padding)
if f.Flag(' ') {
indent = strings.Repeat(" ", padding)
}
symPadding, ok := f.Width()
if !ok {
symPadding = padding - 1
}
if symPadding < 0 {
symPadding = 0
}
symIndent := strings.Repeat("\t", symPadding)
if f.Flag(' ') {
symIndent = strings.Repeat(" ", symPadding)
}
// Guess how many digits we need at most, by assuming that all instructions
// are double wide.
highestOffset := len(insns) * 2
offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
iter := insns.Iterate()
for iter.Next() {
if iter.Ins.Symbol() != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol())
}
if src := iter.Ins.Source(); src != nil {
line := strings.TrimSpace(src.String())
if line != "" {
fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line)
}
}
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
}
}
// Marshal encodes a BPF program into the kernel format.
//
// insns may be modified if there are unresolved jumps or bpf2bpf calls.
//
// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
// without a matching Symbol Instruction within insns.
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
if err := insns.encodeFunctionReferences(); err != nil {
return err
}
if err := insns.encodeMapPointers(); err != nil {
return err
}
for i, ins := range insns {
if _, err := ins.Marshal(w, bo); err != nil {
return fmt.Errorf("instruction %d: %w", i, err)
}
}
return nil
}
// Tag calculates the kernel tag for a series of instructions.
//
// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
// to ProgramInfo.Tag to figure out whether a loaded program matches
// certain instructions.
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
h := sha1.New()
for i, ins := range insns {
if ins.IsLoadFromMap() {
ins.Constant = 0
}
_, err := ins.Marshal(h, bo)
if err != nil {
return "", fmt.Errorf("instruction %d: %w", i, err)
}
}
return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil
}
// encodeFunctionReferences populates the Offset (or Constant, depending on
// the instruction type) field of instructions with a Reference field to point
// to the offset of the corresponding instruction with a matching Symbol field.
//
// Only Reference Instructions that are either jumps or BPF function references
// (calls or function pointer loads) are populated.
//
// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
// without at least one corresponding Symbol Instruction within insns.
func (insns Instructions) encodeFunctionReferences() error {
// Index the offsets of instructions tagged as a symbol.
symbolOffsets := make(map[string]RawInstructionOffset)
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if ins.Symbol() == "" {
continue
}
if _, ok := symbolOffsets[ins.Symbol()]; ok {
return fmt.Errorf("duplicate symbol %s", ins.Symbol())
}
symbolOffsets[ins.Symbol()] = iter.Offset
}
// Find all instructions tagged as references to other symbols.
// Depending on the instruction type, populate their constant or offset
// fields to point to the symbol they refer to within the insn stream.
iter = insns.Iterate()
for iter.Next() {
i := iter.Index
offset := iter.Offset
ins := iter.Ins
if ins.Reference() == "" {
continue
}
switch {
case ins.IsFunctionReference() && ins.Constant == -1,
ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1:
symOffset, ok := symbolOffsets[ins.Reference()]
if !ok {
return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
}
ins.Constant = int64(symOffset - offset - 1)
case ins.OpCode.Class().IsJump() && ins.Offset == -1:
symOffset, ok := symbolOffsets[ins.Reference()]
if !ok {
return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
}
ins.Offset = int16(symOffset - offset - 1)
}
}
return nil
}
// encodeMapPointers finds all Map Instructions and encodes their FDs
// into their Constant fields.
func (insns Instructions) encodeMapPointers() error {
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if !ins.IsLoadFromMap() {
continue
}
m := ins.Map()
if m == nil {
continue
}
fd := m.FD()
if fd < 0 {
return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd)
}
ins.encodeMapFD(m.FD())
}
return nil
}
// Iterate allows iterating a BPF program while keeping track of
// various offsets.
//
// Modifying the instruction slice will lead to undefined behaviour.
func (insns Instructions) Iterate() *InstructionIterator {
return &InstructionIterator{insns: insns}
}
// InstructionIterator iterates over a BPF program.
type InstructionIterator struct {
insns Instructions
// The instruction in question.
Ins *Instruction
// The index of the instruction in the original instruction slice.
Index int
// The offset of the instruction in raw BPF instructions. This accounts
// for double-wide instructions.
Offset RawInstructionOffset
}
// Next returns true as long as there are any instructions remaining.
func (iter *InstructionIterator) Next() bool {
if len(iter.insns) == 0 {
return false
}
if iter.Ins != nil {
iter.Index++
iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
}
iter.Ins = &iter.insns[0]
iter.insns = iter.insns[1:]
return true
}
type bpfRegisters uint8
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
switch bo {
case binary.LittleEndian:
return bpfRegisters((src << 4) | (dst & 0xF)), nil
case binary.BigEndian:
return bpfRegisters((dst << 4) | (src & 0xF)), nil
default:
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
}
// IsUnreferencedSymbol returns true if err was caused by
// an unreferenced symbol.
//
// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol).
func IsUnreferencedSymbol(err error) bool {
return errors.Is(err, ErrUnreferencedSymbol)
}

135
go/vendor/github.com/cilium/ebpf/asm/jump.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
package asm
//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp
// JumpOp affect control flow.
//
// msb lsb
// +----+-+---+
// |OP |s|cls|
// +----+-+---+
type JumpOp uint8
const jumpMask OpCode = 0xf0
const (
// InvalidJumpOp is returned by getters when invoked
// on non branch OpCodes
InvalidJumpOp JumpOp = 0xff
// Ja jumps by offset unconditionally
Ja JumpOp = 0x00
// JEq jumps by offset if r == imm
JEq JumpOp = 0x10
// JGT jumps by offset if r > imm
JGT JumpOp = 0x20
// JGE jumps by offset if r >= imm
JGE JumpOp = 0x30
// JSet jumps by offset if r & imm
JSet JumpOp = 0x40
// JNE jumps by offset if r != imm
JNE JumpOp = 0x50
// JSGT jumps by offset if signed r > signed imm
JSGT JumpOp = 0x60
// JSGE jumps by offset if signed r >= signed imm
JSGE JumpOp = 0x70
// Call builtin or user defined function from imm
Call JumpOp = 0x80
// Exit ends execution, with value in r0
Exit JumpOp = 0x90
// JLT jumps by offset if r < imm
JLT JumpOp = 0xa0
// JLE jumps by offset if r <= imm
JLE JumpOp = 0xb0
// JSLT jumps by offset if signed r < signed imm
JSLT JumpOp = 0xc0
// JSLE jumps by offset if signed r <= signed imm
JSLE JumpOp = 0xd0
)
// Return emits an exit instruction.
//
// Requires a return value in R0.
func Return() Instruction {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Exit),
}
}
// Op returns the OpCode for a given jump source.
func (op JumpOp) Op(source Source) OpCode {
return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
}
// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
return Instruction{
OpCode: op.opCode(JumpClass, ImmSource),
Dst: dst,
Offset: -1,
Constant: int64(value),
}.WithReference(label)
}
// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled.
// Requires kernel 5.1.
func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction {
return Instruction{
OpCode: op.opCode(Jump32Class, ImmSource),
Dst: dst,
Offset: -1,
Constant: int64(value),
}.WithReference(label)
}
// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Reg(dst, src Register, label string) Instruction {
return Instruction{
OpCode: op.opCode(JumpClass, RegSource),
Dst: dst,
Src: src,
Offset: -1,
}.WithReference(label)
}
// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled.
// Requires kernel 5.1.
func (op JumpOp) Reg32(dst, src Register, label string) Instruction {
return Instruction{
OpCode: op.opCode(Jump32Class, RegSource),
Dst: dst,
Src: src,
Offset: -1,
}.WithReference(label)
}
func (op JumpOp) opCode(class Class, source Source) OpCode {
if op == Exit || op == Call {
return InvalidOpCode
}
return OpCode(class).SetJumpOp(op).SetSource(source)
}
// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1].
func LongJump(label string) Instruction {
return Instruction{
OpCode: Ja.opCode(Jump32Class, ImmSource),
Constant: -1,
}.WithReference(label)
}
// Label adjusts PC to the address of the label.
func (op JumpOp) Label(label string) Instruction {
if op == Call {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Call),
Src: PseudoCall,
Constant: -1,
}.WithReference(label)
}
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(op),
Offset: -1,
}.WithReference(label)
}

53
go/vendor/github.com/cilium/ebpf/asm/jump_string.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidJumpOp-255]
_ = x[Ja-0]
_ = x[JEq-16]
_ = x[JGT-32]
_ = x[JGE-48]
_ = x[JSet-64]
_ = x[JNE-80]
_ = x[JSGT-96]
_ = x[JSGE-112]
_ = x[Call-128]
_ = x[Exit-144]
_ = x[JLT-160]
_ = x[JLE-176]
_ = x[JSLT-192]
_ = x[JSLE-208]
}
const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
var _JumpOp_map = map[JumpOp]string{
0: _JumpOp_name[0:2],
16: _JumpOp_name[2:5],
32: _JumpOp_name[5:8],
48: _JumpOp_name[8:11],
64: _JumpOp_name[11:15],
80: _JumpOp_name[15:18],
96: _JumpOp_name[18:22],
112: _JumpOp_name[22:26],
128: _JumpOp_name[26:30],
144: _JumpOp_name[30:34],
160: _JumpOp_name[34:37],
176: _JumpOp_name[37:40],
192: _JumpOp_name[40:44],
208: _JumpOp_name[44:48],
255: _JumpOp_name[48:61],
}
func (i JumpOp) String() string {
if str, ok := _JumpOp_map[i]; ok {
return str
}
return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
}

336
go/vendor/github.com/cilium/ebpf/asm/load_store.go generated vendored Normal file
View File

@@ -0,0 +1,336 @@
package asm
import "fmt"
//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size
// Mode for load and store operations
//
// msb lsb
// +---+--+---+
// |MDE|sz|cls|
// +---+--+---+
type Mode uint8
const modeMask OpCode = 0xe0
const (
// InvalidMode is returned by getters when invoked
// on non load / store OpCodes
InvalidMode Mode = 0xff
// ImmMode - immediate value
ImmMode Mode = 0x00
// AbsMode - immediate value + offset
AbsMode Mode = 0x20
// IndMode - indirect (imm+src)
IndMode Mode = 0x40
// MemMode - load from memory
MemMode Mode = 0x60
// MemSXMode - load from memory, sign extension
MemSXMode Mode = 0x80
// AtomicMode - add atomically across processors.
AtomicMode Mode = 0xc0
)
const atomicMask OpCode = 0x0001_ff00
type AtomicOp uint32
const (
InvalidAtomic AtomicOp = 0xffff_ffff
// AddAtomic - add src to memory address dst atomically
AddAtomic AtomicOp = AtomicOp(Add) << 8
// FetchAdd - add src to memory address dst atomically, store result in src
FetchAdd AtomicOp = AddAtomic | fetch
// AndAtomic - bitwise AND src with memory address at dst atomically
AndAtomic AtomicOp = AtomicOp(And) << 8
// FetchAnd - bitwise AND src with memory address at dst atomically, store result in src
FetchAnd AtomicOp = AndAtomic | fetch
// OrAtomic - bitwise OR src with memory address at dst atomically
OrAtomic AtomicOp = AtomicOp(Or) << 8
// FetchOr - bitwise OR src with memory address at dst atomically, store result in src
FetchOr AtomicOp = OrAtomic | fetch
// XorAtomic - bitwise XOR src with memory address at dst atomically
XorAtomic AtomicOp = AtomicOp(Xor) << 8
// FetchXor - bitwise XOR src with memory address at dst atomically, store result in src
FetchXor AtomicOp = XorAtomic | fetch
// Xchg - atomically exchange the old value with the new value
//
// src gets populated with the old value of *(size *)(dst + offset).
Xchg AtomicOp = 0x0000_e000 | fetch
// CmpXchg - atomically compare and exchange the old value with the new value
//
// Compares R0 and *(size *)(dst + offset), writes src to *(size *)(dst + offset) on match.
// R0 gets populated with the old value of *(size *)(dst + offset), even if no exchange occurs.
CmpXchg AtomicOp = 0x0000_f000 | fetch
// fetch modifier for copy-modify-write atomics
fetch AtomicOp = 0x0000_0100
// loadAcquire - atomically load with acquire semantics
loadAcquire AtomicOp = 0x0001_0000
// storeRelease - atomically store with release semantics
storeRelease AtomicOp = 0x0001_1000
)
func (op AtomicOp) String() string {
var name string
switch op {
case AddAtomic, AndAtomic, OrAtomic, XorAtomic:
name = ALUOp(op >> 8).String()
case FetchAdd, FetchAnd, FetchOr, FetchXor:
name = "Fetch" + ALUOp((op^fetch)>>8).String()
case Xchg:
name = "Xchg"
case CmpXchg:
name = "CmpXchg"
case loadAcquire:
name = "LdAcq"
case storeRelease:
name = "StRel"
default:
name = fmt.Sprintf("AtomicOp(%#x)", uint32(op))
}
return name
}
func (op AtomicOp) OpCode(size Size) OpCode {
switch op {
case AddAtomic, AndAtomic, OrAtomic, XorAtomic,
FetchAdd, FetchAnd, FetchOr, FetchXor,
Xchg, CmpXchg:
switch size {
case Byte, Half:
// 8-bit and 16-bit atomic copy-modify-write atomics are not supported
return InvalidOpCode
}
}
return OpCode(StXClass).SetMode(AtomicMode).SetSize(size).SetAtomicOp(op)
}
// Mem emits `*(size *)(dst + offset) (op) src`.
func (op AtomicOp) Mem(dst, src Register, size Size, offset int16) Instruction {
return Instruction{
OpCode: op.OpCode(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// Emits `lock-acquire dst = *(size *)(src + offset)`.
func LoadAcquire(dst, src Register, size Size, offset int16) Instruction {
return Instruction{
OpCode: loadAcquire.OpCode(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// Emits `lock-release *(size *)(dst + offset) = src`.
func StoreRelease(dst, src Register, size Size, offset int16) Instruction {
return Instruction{
OpCode: storeRelease.OpCode(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// Size of load and store operations
//
// msb lsb
// +---+--+---+
// |mde|SZ|cls|
// +---+--+---+
type Size uint8
const sizeMask OpCode = 0x18
const (
// InvalidSize is returned by getters when invoked
// on non load / store OpCodes
InvalidSize Size = 0xff
// DWord - double word; 64 bits
DWord Size = 0x18
// Word - word; 32 bits
Word Size = 0x00
// Half - half-word; 16 bits
Half Size = 0x08
// Byte - byte; 8 bits
Byte Size = 0x10
)
// Sizeof returns the size in bytes.
func (s Size) Sizeof() int {
switch s {
case DWord:
return 8
case Word:
return 4
case Half:
return 2
case Byte:
return 1
default:
return -1
}
}
// LoadMemOp returns the OpCode to load a value of given size from memory.
func LoadMemOp(size Size) OpCode {
return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
}
// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended.
func LoadMemSXOp(size Size) OpCode {
return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size)
}
// LoadMem emits `dst = *(size *)(src + offset)`.
func LoadMem(dst, src Register, offset int16, size Size) Instruction {
return Instruction{
OpCode: LoadMemOp(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst.
func LoadMemSX(dst, src Register, offset int16, size Size) Instruction {
if size == DWord {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: LoadMemSXOp(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// LoadImmOp returns the OpCode to load an immediate of given size.
//
// As of kernel 4.20, only DWord size is accepted.
func LoadImmOp(size Size) OpCode {
return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
}
// LoadImm emits `dst = (size)value`.
//
// As of kernel 4.20, only DWord size is accepted.
func LoadImm(dst Register, value int64, size Size) Instruction {
return Instruction{
OpCode: LoadImmOp(size),
Dst: dst,
Constant: value,
}
}
// LoadMapPtr stores a pointer to a map in dst.
func LoadMapPtr(dst Register, fd int) Instruction {
if fd < 0 {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapFD,
Constant: int64(uint32(fd)),
}
}
// LoadMapValue stores a pointer to the value at a certain offset of a map.
func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
if fd < 0 {
return Instruction{OpCode: InvalidOpCode}
}
fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
return Instruction{
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapValue,
Constant: int64(fdAndOffset),
}
}
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
func LoadIndOp(size Size) OpCode {
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
}
// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
func LoadInd(dst, src Register, offset int32, size Size) Instruction {
return Instruction{
OpCode: LoadIndOp(size),
Dst: dst,
Src: src,
Constant: int64(offset),
}
}
// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
func LoadAbsOp(size Size) OpCode {
return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
}
// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
func LoadAbs(offset int32, size Size) Instruction {
return Instruction{
OpCode: LoadAbsOp(size),
Dst: R0,
Constant: int64(offset),
}
}
// StoreMemOp returns the OpCode for storing a register of given size in memory.
func StoreMemOp(size Size) OpCode {
return OpCode(StXClass).SetMode(MemMode).SetSize(size)
}
// StoreMem emits `*(size *)(dst + offset) = src`
func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
return Instruction{
OpCode: StoreMemOp(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
func StoreImmOp(size Size) OpCode {
return OpCode(StClass).SetMode(MemMode).SetSize(size)
}
// StoreImm emits `*(size *)(dst + offset) = value`.
func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
if size == DWord {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: StoreImmOp(size),
Dst: dst,
Offset: offset,
Constant: value,
}
}
// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
func StoreXAddOp(size Size) OpCode {
return AddAtomic.OpCode(size)
}
// StoreXAdd atomically adds src to *dst.
func StoreXAdd(dst, src Register, size Size) Instruction {
return AddAtomic.Mem(dst, src, size, 0)
}

View File

@@ -0,0 +1,84 @@
// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidMode-255]
_ = x[ImmMode-0]
_ = x[AbsMode-32]
_ = x[IndMode-64]
_ = x[MemMode-96]
_ = x[MemSXMode-128]
_ = x[AtomicMode-192]
}
const (
_Mode_name_0 = "ImmMode"
_Mode_name_1 = "AbsMode"
_Mode_name_2 = "IndMode"
_Mode_name_3 = "MemMode"
_Mode_name_4 = "MemSXMode"
_Mode_name_5 = "AtomicMode"
_Mode_name_6 = "InvalidMode"
)
func (i Mode) String() string {
switch {
case i == 0:
return _Mode_name_0
case i == 32:
return _Mode_name_1
case i == 64:
return _Mode_name_2
case i == 96:
return _Mode_name_3
case i == 128:
return _Mode_name_4
case i == 192:
return _Mode_name_5
case i == 255:
return _Mode_name_6
default:
return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidSize-255]
_ = x[DWord-24]
_ = x[Word-0]
_ = x[Half-8]
_ = x[Byte-16]
}
const (
_Size_name_0 = "Word"
_Size_name_1 = "Half"
_Size_name_2 = "Byte"
_Size_name_3 = "DWord"
_Size_name_4 = "InvalidSize"
)
func (i Size) String() string {
switch {
case i == 0:
return _Size_name_0
case i == 8:
return _Size_name_1
case i == 16:
return _Size_name_2
case i == 24:
return _Size_name_3
case i == 255:
return _Size_name_4
default:
return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

80
go/vendor/github.com/cilium/ebpf/asm/metadata.go generated vendored Normal file
View File

@@ -0,0 +1,80 @@
package asm
// Metadata contains metadata about an instruction.
type Metadata struct {
head *metaElement
}
type metaElement struct {
next *metaElement
key, value interface{}
}
// Find the element containing key.
//
// Returns nil if there is no such element.
func (m *Metadata) find(key interface{}) *metaElement {
for e := m.head; e != nil; e = e.next {
if e.key == key {
return e
}
}
return nil
}
// Remove an element from the linked list.
//
// Copies as many elements of the list as necessary to remove r, but doesn't
// perform a full copy.
func (m *Metadata) remove(r *metaElement) {
current := &m.head
for e := m.head; e != nil; e = e.next {
if e == r {
// We've found the element we want to remove.
*current = e.next
// No need to copy the tail.
return
}
// There is another element in front of the one we want to remove.
// We have to copy it to be able to change metaElement.next.
cpy := &metaElement{key: e.key, value: e.value}
*current = cpy
current = &cpy.next
}
}
// Set a key to a value.
//
// If value is nil, the key is removed. Avoids modifying old metadata by
// copying if necessary.
func (m *Metadata) Set(key, value interface{}) {
if e := m.find(key); e != nil {
if e.value == value {
// Key is present and the value is the same. Nothing to do.
return
}
// Key is present with a different value. Create a copy of the list
// which doesn't have the element in it.
m.remove(e)
}
// m.head is now a linked list that doesn't contain key.
if value == nil {
return
}
m.head = &metaElement{key: key, value: value, next: m.head}
}
// Get the value of a key.
//
// Returns nil if no value with the given key is present.
func (m *Metadata) Get(key interface{}) interface{} {
if e := m.find(key); e != nil {
return e.value
}
return nil
}

327
go/vendor/github.com/cilium/ebpf/asm/opcode.go generated vendored Normal file
View File

@@ -0,0 +1,327 @@
package asm
import (
"fmt"
"strings"
)
//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class
// Class of operations
//
// msb lsb
// +---+--+---+
// | ?? |CLS|
// +---+--+---+
type Class uint8
const classMask OpCode = 0x07
const (
// LdClass loads immediate values into registers.
// Also used for non-standard load operations from cBPF.
LdClass Class = 0x00
// LdXClass loads memory into registers.
LdXClass Class = 0x01
// StClass stores immediate values to memory.
StClass Class = 0x02
// StXClass stores registers to memory.
StXClass Class = 0x03
// ALUClass describes arithmetic operators.
ALUClass Class = 0x04
// JumpClass describes jump operators.
JumpClass Class = 0x05
// Jump32Class describes jump operators with 32-bit comparisons.
// Requires kernel 5.1.
Jump32Class Class = 0x06
// ALU64Class describes arithmetic operators in 64-bit mode.
ALU64Class Class = 0x07
)
// IsLoad checks if this is either LdClass or LdXClass.
func (cls Class) IsLoad() bool {
return cls == LdClass || cls == LdXClass
}
// IsStore checks if this is either StClass or StXClass.
func (cls Class) IsStore() bool {
return cls == StClass || cls == StXClass
}
func (cls Class) isLoadOrStore() bool {
return cls.IsLoad() || cls.IsStore()
}
// IsALU checks if this is either ALUClass or ALU64Class.
func (cls Class) IsALU() bool {
return cls == ALUClass || cls == ALU64Class
}
// IsJump checks if this is either JumpClass or Jump32Class.
func (cls Class) IsJump() bool {
return cls == JumpClass || cls == Jump32Class
}
func (cls Class) isJumpOrALU() bool {
return cls.IsJump() || cls.IsALU()
}
// OpCode represents a single operation.
// It is not a 1:1 mapping to real eBPF opcodes.
//
// The encoding varies based on a 3-bit Class:
//
// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
// ??? | CLS
//
// For ALUClass and ALUCLass32:
//
// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
// 0 | OPC |S| CLS
//
// For LdClass, LdXclass, StClass and StXClass:
//
// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
// 0 | MDE |SIZ| CLS
//
// For StXClass where MDE == AtomicMode:
//
// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
// 0 | ATOMIC OP | MDE |SIZ| CLS
//
// For JumpClass, Jump32Class:
//
// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
// 0 | OPC |S| CLS
type OpCode uint32
// InvalidOpCode is returned by setters on OpCode
const InvalidOpCode OpCode = 0xffff
// bpfOpCode returns the actual BPF opcode.
func (op OpCode) bpfOpCode() (byte, error) {
const opCodeMask = 0xff
if !valid(op, opCodeMask) {
return 0, fmt.Errorf("invalid opcode %x", op)
}
return byte(op & opCodeMask), nil
}
// rawInstructions returns the number of BPF instructions required
// to encode this opcode.
func (op OpCode) rawInstructions() int {
if op.IsDWordLoad() {
return 2
}
return 1
}
func (op OpCode) IsDWordLoad() bool {
return op == LoadImmOp(DWord)
}
// Class returns the class of operation.
func (op OpCode) Class() Class {
return Class(op & classMask)
}
// Mode returns the mode for load and store operations.
func (op OpCode) Mode() Mode {
if !op.Class().isLoadOrStore() {
return InvalidMode
}
return Mode(op & modeMask)
}
// Size returns the size for load and store operations.
func (op OpCode) Size() Size {
if !op.Class().isLoadOrStore() {
return InvalidSize
}
return Size(op & sizeMask)
}
// AtomicOp returns the type of atomic operation.
func (op OpCode) AtomicOp() AtomicOp {
if op.Class() != StXClass || op.Mode() != AtomicMode {
return InvalidAtomic
}
return AtomicOp(op & atomicMask)
}
// Source returns the source for branch and ALU operations.
func (op OpCode) Source() Source {
if !op.Class().isJumpOrALU() || op.ALUOp() == Swap {
return InvalidSource
}
return Source(op & sourceMask)
}
// ALUOp returns the ALUOp.
func (op OpCode) ALUOp() ALUOp {
if !op.Class().IsALU() {
return InvalidALUOp
}
return ALUOp(op & aluMask)
}
// Endianness returns the Endianness for a byte swap instruction.
func (op OpCode) Endianness() Endianness {
if op.ALUOp() != Swap {
return InvalidEndian
}
return Endianness(op & endianMask)
}
// JumpOp returns the JumpOp.
// Returns InvalidJumpOp if it doesn't encode a jump.
func (op OpCode) JumpOp() JumpOp {
if !op.Class().IsJump() {
return InvalidJumpOp
}
jumpOp := JumpOp(op & jumpMask)
// Some JumpOps are only supported by JumpClass, not Jump32Class.
if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) {
return InvalidJumpOp
}
return jumpOp
}
// SetMode sets the mode on load and store operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetMode(mode Mode) OpCode {
if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) {
return InvalidOpCode
}
return (op & ^modeMask) | OpCode(mode)
}
// SetSize sets the size on load and store operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSize(size Size) OpCode {
if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) {
return InvalidOpCode
}
return (op & ^sizeMask) | OpCode(size)
}
func (op OpCode) SetAtomicOp(atomic AtomicOp) OpCode {
if op.Class() != StXClass || op.Mode() != AtomicMode || !valid(OpCode(atomic), atomicMask) {
return InvalidOpCode
}
return (op & ^atomicMask) | OpCode(atomic)
}
// SetSource sets the source on jump and ALU operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSource(source Source) OpCode {
if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) {
return InvalidOpCode
}
return (op & ^sourceMask) | OpCode(source)
}
// SetALUOp sets the ALUOp on ALU operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetALUOp(alu ALUOp) OpCode {
if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) {
return InvalidOpCode
}
return (op & ^aluMask) | OpCode(alu)
}
// SetJumpOp sets the JumpOp on jump operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) {
return InvalidOpCode
}
newOp := (op & ^jumpMask) | OpCode(jump)
// Check newOp is legal.
if newOp.JumpOp() == InvalidJumpOp {
return InvalidOpCode
}
return newOp
}
func (op OpCode) String() string {
var f strings.Builder
switch class := op.Class(); {
case class.isLoadOrStore():
f.WriteString(strings.TrimSuffix(class.String(), "Class"))
mode := op.Mode()
f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
if atomic := op.AtomicOp(); atomic != InvalidAtomic {
f.WriteString(strings.TrimSuffix(atomic.String(), "Atomic"))
}
switch op.Size() {
case DWord:
f.WriteString("DW")
case Word:
f.WriteString("W")
case Half:
f.WriteString("H")
case Byte:
f.WriteString("B")
}
case class.IsALU():
if op.ALUOp() == Swap && op.Class() == ALU64Class {
// B to make BSwap, uncontitional byte swap
f.WriteString("B")
}
f.WriteString(op.ALUOp().String())
if op.ALUOp() == Swap {
if op.Class() == ALUClass {
// Width for Endian is controlled by Constant
f.WriteString(op.Endianness().String())
}
} else {
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
if class == ALUClass {
f.WriteString("32")
}
}
case class.IsJump():
f.WriteString(op.JumpOp().String())
if class == Jump32Class {
f.WriteString("32")
}
if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja {
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
}
default:
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
}
return f.String()
}
// valid returns true if all bits in value are covered by mask.
func valid(value, mask OpCode) bool {
return value & ^mask == 0
}

30
go/vendor/github.com/cilium/ebpf/asm/opcode_string.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[LdClass-0]
_ = x[LdXClass-1]
_ = x[StClass-2]
_ = x[StXClass-3]
_ = x[ALUClass-4]
_ = x[JumpClass-5]
_ = x[Jump32Class-6]
_ = x[ALU64Class-7]
}
const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class"
var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68}
func (i Class) String() string {
if i >= Class(len(_Class_index)-1) {
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Class_name[_Class_index[i]:_Class_index[i+1]]
}

51
go/vendor/github.com/cilium/ebpf/asm/register.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
package asm
import (
"fmt"
)
// Register is the source or destination of most operations.
type Register uint8
// R0 contains return values.
const R0 Register = 0
// Registers for function arguments.
const (
R1 Register = R0 + 1 + iota
R2
R3
R4
R5
)
// Callee saved registers preserved by function calls.
const (
R6 Register = R5 + 1 + iota
R7
R8
R9
)
// Read-only frame pointer to access stack.
const (
R10 Register = R9 + 1
RFP = R10
)
// Pseudo registers used by 64bit loads and jumps
const (
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
PseudoCall = R1 // BPF_PSEUDO_CALL
PseudoFunc = R4 // BPF_PSEUDO_FUNC
PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL
)
func (r Register) String() string {
v := uint8(r)
if v == 10 {
return "rfp"
}
return fmt.Sprintf("r%d", v)
}

100
go/vendor/github.com/cilium/ebpf/attachtype_string.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT.
package ebpf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[AttachNone-0]
_ = x[AttachCGroupInetIngress-0]
_ = x[AttachCGroupInetEgress-1]
_ = x[AttachCGroupInetSockCreate-2]
_ = x[AttachCGroupSockOps-3]
_ = x[AttachSkSKBStreamParser-4]
_ = x[AttachSkSKBStreamVerdict-5]
_ = x[AttachCGroupDevice-6]
_ = x[AttachSkMsgVerdict-7]
_ = x[AttachCGroupInet4Bind-8]
_ = x[AttachCGroupInet6Bind-9]
_ = x[AttachCGroupInet4Connect-10]
_ = x[AttachCGroupInet6Connect-11]
_ = x[AttachCGroupInet4PostBind-12]
_ = x[AttachCGroupInet6PostBind-13]
_ = x[AttachCGroupUDP4Sendmsg-14]
_ = x[AttachCGroupUDP6Sendmsg-15]
_ = x[AttachLircMode2-16]
_ = x[AttachFlowDissector-17]
_ = x[AttachCGroupSysctl-18]
_ = x[AttachCGroupUDP4Recvmsg-19]
_ = x[AttachCGroupUDP6Recvmsg-20]
_ = x[AttachCGroupGetsockopt-21]
_ = x[AttachCGroupSetsockopt-22]
_ = x[AttachTraceRawTp-23]
_ = x[AttachTraceFEntry-24]
_ = x[AttachTraceFExit-25]
_ = x[AttachModifyReturn-26]
_ = x[AttachLSMMac-27]
_ = x[AttachTraceIter-28]
_ = x[AttachCgroupInet4GetPeername-29]
_ = x[AttachCgroupInet6GetPeername-30]
_ = x[AttachCgroupInet4GetSockname-31]
_ = x[AttachCgroupInet6GetSockname-32]
_ = x[AttachXDPDevMap-33]
_ = x[AttachCgroupInetSockRelease-34]
_ = x[AttachXDPCPUMap-35]
_ = x[AttachSkLookup-36]
_ = x[AttachXDP-37]
_ = x[AttachSkSKBVerdict-38]
_ = x[AttachSkReuseportSelect-39]
_ = x[AttachSkReuseportSelectOrMigrate-40]
_ = x[AttachPerfEvent-41]
_ = x[AttachTraceKprobeMulti-42]
_ = x[AttachTraceKprobeSession-56]
_ = x[AttachLSMCgroup-43]
_ = x[AttachStructOps-44]
_ = x[AttachNetfilter-45]
_ = x[AttachTCXIngress-46]
_ = x[AttachTCXEgress-47]
_ = x[AttachTraceUprobeMulti-48]
_ = x[AttachCgroupUnixConnect-49]
_ = x[AttachCgroupUnixSendmsg-50]
_ = x[AttachCgroupUnixRecvmsg-51]
_ = x[AttachCgroupUnixGetpeername-52]
_ = x[AttachCgroupUnixGetsockname-53]
_ = x[AttachNetkitPrimary-54]
_ = x[AttachNetkitPeer-55]
_ = x[AttachWindowsXDP-268435457]
_ = x[AttachWindowsBind-268435458]
_ = x[AttachWindowsCGroupInet4Connect-268435459]
_ = x[AttachWindowsCGroupInet6Connect-268435460]
_ = x[AttachWindowsCgroupInet4RecvAccept-268435461]
_ = x[AttachWindowsCgroupInet6RecvAccept-268435462]
_ = x[AttachWindowsCGroupSockOps-268435463]
_ = x[AttachWindowsSample-268435464]
_ = x[AttachWindowsXDPTest-268435465]
}
const (
_AttachType_name_0 = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeerTraceKprobeSession"
_AttachType_name_1 = "WindowsXDPWindowsBindWindowsCGroupInet4ConnectWindowsCGroupInet6ConnectWindowsCgroupInet4RecvAcceptWindowsCgroupInet6RecvAcceptWindowsCGroupSockOpsWindowsSampleWindowsXDPTest"
)
var (
_AttachType_index_0 = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804, 822}
_AttachType_index_1 = [...]uint8{0, 10, 21, 46, 71, 99, 127, 147, 160, 174}
)
func (i AttachType) String() string {
switch {
case i <= 56:
return _AttachType_name_0[_AttachType_index_0[i]:_AttachType_index_0[i+1]]
case 268435457 <= i && i <= 268435465:
i -= 268435457
return _AttachType_name_1[_AttachType_index_1[i]:_AttachType_index_1[i+1]]
default:
return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

535
go/vendor/github.com/cilium/ebpf/btf/btf.go generated vendored Normal file
View File

@@ -0,0 +1,535 @@
package btf
import (
"bufio"
"debug/elf"
"encoding/binary"
"errors"
"fmt"
"io"
"iter"
"maps"
"math"
"os"
"reflect"
"slices"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
)
const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrMultipleMatches = errors.New("multiple matching types")
)
// ID represents the unique ID of a BTF object.
type ID = sys.BTFID
type elfData struct {
sectionSizes map[string]uint32
symbolOffsets map[elfSymbol]uint32
fixups map[Type]bool
}
type elfSymbol struct {
section string
name string
}
// Spec allows querying a set of Types and loading the set into the
// kernel.
type Spec struct {
*decoder
// Additional data from ELF, may be nil.
elf *elfData
}
// LoadSpec opens file and calls LoadSpecFromReader on it.
func LoadSpec(file string) (*Spec, error) {
fh, err := os.Open(file)
if err != nil {
return nil, err
}
defer fh.Close()
return LoadSpecFromReader(fh)
}
// LoadSpecFromReader reads from an ELF or a raw BTF blob.
//
// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
// may be nil.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
}
return nil, err
}
return loadSpecFromELF(file)
}
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
// ExtInfos may be nil if the ELF doesn't contain section metadata.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, nil, err
}
spec, err := loadSpecFromELF(file)
if err != nil {
return nil, nil, err
}
extInfos, err := loadExtInfosFromELF(file, spec)
if err != nil && !errors.Is(err, ErrNotFound) {
return nil, nil, err
}
return spec, extInfos, nil
}
// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
// References to variables in BTF data sections carry unsigned 32-bit offsets.
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
// beyond this range. Since these symbols cannot be described by BTF info,
// ignore them here.
func symbolOffsets(file *internal.SafeELFFile) (map[elfSymbol]uint32, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
offsets := make(map[elfSymbol]uint32)
for _, sym := range symbols {
if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
if sym.Value > math.MaxUint32 {
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
continue
}
if int(sym.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
}
secName := file.Sections[sym.Section].Name
offsets[elfSymbol{secName, sym.Name}] = uint32(sym.Value)
}
return offsets, nil
}
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
var (
btfSection *elf.Section
sectionSizes = make(map[string]uint32)
)
for _, sec := range file.Sections {
switch sec.Name {
case ".BTF":
btfSection = sec
default:
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
break
}
if sec.Size > math.MaxUint32 {
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
}
sectionSizes[sec.Name] = uint32(sec.Size)
}
}
if btfSection == nil {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
offsets, err := symbolOffsets(file)
if err != nil {
return nil, err
}
if btfSection.ReaderAt == nil {
return nil, fmt.Errorf("compressed BTF is not supported")
}
spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
if err != nil {
return nil, err
}
spec.elf = &elfData{
sectionSizes,
offsets,
make(map[Type]bool),
}
return spec, nil
}
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
var (
baseDecoder *decoder
baseStrings *stringTable
err error
)
if base != nil {
baseDecoder = base.decoder
baseStrings = base.strings
}
buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
header, err := parseBTFHeader(buf, bo)
if err != nil {
return nil, fmt.Errorf("parsing .BTF header: %v", err)
}
stringsSection := io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen))
rawStrings, err := readStringTable(stringsSection, baseStrings)
if err != nil {
return nil, fmt.Errorf("read string section: %w", err)
}
typesSection := io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))
rawTypes := make([]byte, header.TypeLen)
if _, err := io.ReadFull(typesSection, rawTypes); err != nil {
return nil, fmt.Errorf("read type section: %w", err)
}
decoder, err := newDecoder(rawTypes, bo, rawStrings, baseDecoder)
if err != nil {
return nil, err
}
return &Spec{decoder, nil}, nil
}
func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
buf := new(bufio.Reader)
for _, bo := range []binary.ByteOrder{
binary.LittleEndian,
binary.BigEndian,
} {
buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
if _, err := parseBTFHeader(buf, bo); err == nil {
return bo
}
}
return nil
}
// fixupDatasec attempts to patch up missing info in Datasecs and its members by
// supplementing them with information from the ELF headers and symbol table.
func (elf *elfData) fixupDatasec(typ Type) error {
if elf == nil {
return nil
}
if ds, ok := typ.(*Datasec); ok {
if elf.fixups[ds] {
return nil
}
elf.fixups[ds] = true
name := ds.Name
// Some Datasecs are virtual and don't have corresponding ELF sections.
switch name {
case ".ksyms":
// .ksyms describes forward declarations of kfunc signatures, as well as
// references to kernel symbols.
// Nothing to fix up, all sizes and offsets are 0.
for _, vsi := range ds.Vars {
switch t := vsi.Type.(type) {
case *Func:
continue
case *Var:
if _, ok := t.Type.(*Void); !ok {
return fmt.Errorf("data section %s: expected %s to be *Void, not %T: %w", name, vsi.Type.TypeName(), vsi.Type, ErrNotSupported)
}
default:
return fmt.Errorf("data section %s: expected to be either *btf.Func or *btf.Var, not %T: %w", name, vsi.Type, ErrNotSupported)
}
}
return nil
case ".kconfig":
// .kconfig has a size of 0 and has all members' offsets set to 0.
// Fix up all offsets and set the Datasec's size.
if err := fixupDatasecLayout(ds); err != nil {
return err
}
// Fix up extern to global linkage to avoid a BTF verifier error.
for _, vsi := range ds.Vars {
vsi.Type.(*Var).Linkage = GlobalVar
}
return nil
}
if ds.Size != 0 {
return nil
}
ds.Size, ok = elf.sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
for i := range ds.Vars {
symName := ds.Vars[i].Type.TypeName()
ds.Vars[i].Offset, ok = elf.symbolOffsets[elfSymbol{name, symName}]
if !ok {
return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
}
}
}
return nil
}
// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
// alignment. Calculate and set ds.Size.
func fixupDatasecLayout(ds *Datasec) error {
var off uint32
for i, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
}
size, err := Sizeof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
}
align, err := alignof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
}
// Align the current member based on the offset of the end of the previous
// member and the alignment of the current member.
off = internal.Align(off, uint32(align))
ds.Vars[i].Offset = off
off += uint32(size)
}
ds.Size = off
return nil
}
// Copy a Spec.
//
// All contained types are duplicated while preserving any modifications made
// to them.
func (s *Spec) Copy() *Spec {
if s == nil {
return nil
}
cpy := &Spec{
s.decoder.Copy(),
nil,
}
if s.elf != nil {
cpy.elf = &elfData{
s.elf.sectionSizes,
s.elf.symbolOffsets,
maps.Clone(s.elf.fixups),
}
}
return cpy
}
// TypeByID returns the BTF Type with the given type ID.
//
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
typ, err := s.decoder.TypeByID(id)
if err != nil {
return nil, fmt.Errorf("inflate type: %w", err)
}
if err := s.elf.fixupDatasec(typ); err != nil {
return nil, err
}
return typ, nil
}
// TypeID returns the ID for a given Type.
//
// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec.
func (s *Spec) TypeID(typ Type) (TypeID, error) {
return s.decoder.TypeID(typ)
}
// AnyTypesByName returns a list of BTF Types with the given name.
//
// If the BTF blob describes multiple compilation units like vmlinux, multiple
// Types with the same name and kind can exist, but might not describe the same
// data structure.
//
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
types, err := s.TypesByName(newEssentialName(name))
if err != nil {
return nil, err
}
for i := 0; i < len(types); i++ {
// Match against the full name, not just the essential one
// in case the type being looked up is a struct flavor.
if types[i].TypeName() != name {
types = slices.Delete(types, i, i+1)
continue
}
if err := s.elf.fixupDatasec(types[i]); err != nil {
return nil, err
}
}
return types, nil
}
// AnyTypeByName returns a Type with the given name.
//
// Returns an error if multiple types of that name exist.
func (s *Spec) AnyTypeByName(name string) (Type, error) {
types, err := s.AnyTypesByName(name)
if err != nil {
return nil, err
}
if len(types) > 1 {
return nil, fmt.Errorf("found multiple types: %v", types)
}
return types[0], nil
}
// TypeByName searches for a Type with a specific name. Since multiple Types
// with the same name can exist, the parameter typ is taken to narrow down the
// search in case of a clash.
//
// typ must be a non-nil pointer to an implementation of a Type. On success, the
// address of the found Type will be copied to typ.
//
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
func (s *Spec) TypeByName(name string, typ interface{}) error {
typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
// typ may be **T or *Type
typValue := reflect.ValueOf(typ)
if typValue.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", typ)
}
typPtr := typValue.Elem()
if !typPtr.CanSet() {
return fmt.Errorf("%T cannot be set", typ)
}
wanted := typPtr.Type()
if wanted == typeInterface {
// This is *Type. Unwrap the value's type.
wanted = typPtr.Elem().Type()
}
if !wanted.AssignableTo(typeInterface) {
return fmt.Errorf("%T does not satisfy Type interface", typ)
}
types, err := s.AnyTypesByName(name)
if err != nil {
return err
}
var candidate Type
for _, typ := range types {
if reflect.TypeOf(typ) != wanted {
continue
}
if candidate != nil {
return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
}
candidate = typ
}
if candidate == nil {
return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
}
typPtr.Set(reflect.ValueOf(candidate))
return nil
}
// LoadSplitSpecFromReader loads split BTF from a reader.
//
// Types from base are used to resolve references in the split BTF.
// The returned Spec only contains types from the split BTF, not from the base.
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
return loadRawSpec(r, internal.NativeEndian, base)
}
// All iterates over all types.
func (s *Spec) All() iter.Seq2[Type, error] {
return func(yield func(Type, error) bool) {
for id := s.firstTypeID; ; id++ {
typ, err := s.TypeByID(id)
if errors.Is(err, ErrNotFound) {
return
} else if err != nil {
yield(nil, err)
return
}
// Skip declTags, during unmarshaling declTags become `Tags` fields of other types.
// We keep them in the spec to avoid holes in the ID space, but for the purposes of
// iteration, they are not useful to the user.
if _, ok := typ.(*declTag); ok {
continue
}
if !yield(typ, nil) {
return
}
}
}
}

513
go/vendor/github.com/cilium/ebpf/btf/btf_types.go generated vendored Normal file
View File

@@ -0,0 +1,513 @@
package btf
import (
"encoding/binary"
"errors"
"fmt"
"io"
"unsafe"
"github.com/cilium/ebpf/internal"
)
//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
// btfKind describes a Type.
type btfKind uint8
// Equivalents of the BTF_KIND_* constants.
const (
kindUnknown btfKind = iota // Unknown
kindInt // Int
kindPointer // Pointer
kindArray // Array
kindStruct // Struct
kindUnion // Union
kindEnum // Enum
kindForward // Forward
kindTypedef // Typedef
kindVolatile // Volatile
kindConst // Const
kindRestrict // Restrict
// Added ~4.20
kindFunc // Func
kindFuncProto // FuncProto
// Added ~5.1
kindVar // Var
kindDatasec // Datasec
// Added ~5.13
kindFloat // Float
// Added 5.16
kindDeclTag // DeclTag
// Added 5.17
kindTypeTag // TypeTag
// Added 6.0
kindEnum64 // Enum64
)
// FuncLinkage describes BTF function linkage metadata.
type FuncLinkage int
// Equivalent of enum btf_func_linkage.
const (
StaticFunc FuncLinkage = iota // static
GlobalFunc // global
ExternFunc // extern
)
// VarLinkage describes BTF variable linkage metadata.
type VarLinkage int
const (
StaticVar VarLinkage = iota // static
GlobalVar // global
ExternVar // extern
)
const (
btfTypeKindShift = 24
btfTypeKindLen = 5
btfTypeVlenShift = 0
btfTypeVlenMask = 16
btfTypeKindFlagShift = 31
btfTypeKindFlagMask = 1
)
var btfHeaderLen = binary.Size(&btfHeader{})
type btfHeader struct {
Magic uint16
Version uint8
Flags uint8
HdrLen uint32
TypeOff uint32
TypeLen uint32
StringOff uint32
StringLen uint32
}
// typeStart returns the offset from the beginning of the .BTF section
// to the start of its type entries.
func (h *btfHeader) typeStart() int64 {
return int64(h.HdrLen + h.TypeOff)
}
// stringStart returns the offset from the beginning of the .BTF section
// to the start of its string table.
func (h *btfHeader) stringStart() int64 {
return int64(h.HdrLen + h.StringOff)
}
// parseBTFHeader parses the header of the .BTF section.
func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
var header btfHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, errors.New("header length shorter than btfHeader size")
}
if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
return nil, fmt.Errorf("header padding: %v", err)
}
return &header, nil
}
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members), linkage
* bits 16-23: unused
* bits 24-28: kind (e.g. int, ptr, array...etc)
* bits 29-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
Info uint32
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
SizeType uint32
}
var btfTypeSize = int(unsafe.Sizeof(btfType{}))
func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfTypeSize {
return 0, fmt.Errorf("not enough bytes to unmarshal btfType")
}
bt.NameOff = bo.Uint32(b[0:])
bt.Info = bo.Uint32(b[4:])
bt.SizeType = bo.Uint32(b[8:])
return btfTypeSize, nil
}
func mask(len uint32) uint32 {
return (1 << len) - 1
}
func readBits(value, len, shift uint32) uint32 {
return (value >> shift) & mask(len)
}
func writeBits(value, len, shift, new uint32) uint32 {
value &^= mask(len) << shift
value |= (new & mask(len)) << shift
return value
}
func (bt *btfType) info(len, shift uint32) uint32 {
return readBits(bt.Info, len, shift)
}
func (bt *btfType) setInfo(value, len, shift uint32) {
bt.Info = writeBits(bt.Info, len, shift, value)
}
func (bt *btfType) Kind() btfKind {
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
}
func (bt *btfType) SetKind(kind btfKind) {
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
}
func (bt *btfType) Vlen() int {
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) kindFlagBool() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) setKindFlagBool(set bool) {
var value uint32
if set {
value = 1
}
bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
}
// Bitfield returns true if the struct or union contain a bitfield.
func (bt *btfType) Bitfield() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetBitfield(isBitfield bool) {
bt.setKindFlagBool(isBitfield)
}
func (bt *btfType) FwdKind() FwdKind {
return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
}
func (bt *btfType) SetFwdKind(kind FwdKind) {
bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
}
func (bt *btfType) Signed() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetSigned(signed bool) {
bt.setKindFlagBool(signed)
}
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetLinkage(linkage FuncLinkage) {
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) Type() TypeID {
// TODO: Panic here if wrong kind?
return TypeID(bt.SizeType)
}
func (bt *btfType) SetType(id TypeID) {
bt.SizeType = uint32(id)
}
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
}
func (bt *btfType) SetSize(size uint32) {
bt.SizeType = size
}
func (bt *btfType) Encode(buf []byte, bo binary.ByteOrder) (int, error) {
if len(buf) < btfTypeSize {
return 0, fmt.Errorf("not enough bytes to marshal btfType")
}
bo.PutUint32(buf[0:], bt.NameOff)
bo.PutUint32(buf[4:], bt.Info)
bo.PutUint32(buf[8:], bt.SizeType)
return btfTypeSize, nil
}
// DataLen returns the length of additional type specific data in bytes.
func (bt *btfType) DataLen() (int, error) {
switch bt.Kind() {
case kindInt:
return int(unsafe.Sizeof(btfInt{})), nil
case kindPointer:
case kindArray:
return int(unsafe.Sizeof(btfArray{})), nil
case kindStruct:
fallthrough
case kindUnion:
return int(unsafe.Sizeof(btfMember{})) * bt.Vlen(), nil
case kindEnum:
return int(unsafe.Sizeof(btfEnum{})) * bt.Vlen(), nil
case kindForward:
case kindTypedef:
case kindVolatile:
case kindConst:
case kindRestrict:
case kindFunc:
case kindFuncProto:
return int(unsafe.Sizeof(btfParam{})) * bt.Vlen(), nil
case kindVar:
return int(unsafe.Sizeof(btfVariable{})), nil
case kindDatasec:
return int(unsafe.Sizeof(btfVarSecinfo{})) * bt.Vlen(), nil
case kindFloat:
case kindDeclTag:
return int(unsafe.Sizeof(btfDeclTag{})), nil
case kindTypeTag:
case kindEnum64:
return int(unsafe.Sizeof(btfEnum64{})) * bt.Vlen(), nil
default:
return 0, fmt.Errorf("unknown kind: %v", bt.Kind())
}
return 0, nil
}
// btfInt encodes additional data for integers.
//
// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
// ? = undefined
// e = encoding
// o = offset (bitfields?)
// b = bits (bitfields)
type btfInt struct {
Raw uint32
}
const (
btfIntEncodingLen = 4
btfIntEncodingShift = 24
btfIntOffsetLen = 8
btfIntOffsetShift = 16
btfIntBitsLen = 8
btfIntBitsShift = 0
)
var btfIntLen = int(unsafe.Sizeof(btfInt{}))
func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfIntLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfInt")
}
bi.Raw = bo.Uint32(b[0:])
return btfIntLen, nil
}
func (bi btfInt) Encoding() IntEncoding {
return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
}
func (bi *btfInt) SetEncoding(e IntEncoding) {
bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
}
func (bi btfInt) Offset() Bits {
return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
}
func (bi *btfInt) SetOffset(offset uint32) {
bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
}
func (bi btfInt) Bits() Bits {
return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
}
func (bi *btfInt) SetBits(bits byte) {
bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
}
type btfArray struct {
Type TypeID
IndexType TypeID
Nelems uint32
}
var btfArrayLen = int(unsafe.Sizeof(btfArray{}))
func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfArrayLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfArray")
}
ba.Type = TypeID(bo.Uint32(b[0:]))
ba.IndexType = TypeID(bo.Uint32(b[4:]))
ba.Nelems = bo.Uint32(b[8:])
return btfArrayLen, nil
}
type btfMember struct {
NameOff uint32
Type TypeID
Offset uint32
}
var btfMemberLen = int(unsafe.Sizeof(btfMember{}))
func unmarshalBtfMember(bm *btfMember, b []byte, bo binary.ByteOrder) (int, error) {
if btfMemberLen > len(b) {
return 0, fmt.Errorf("not enough bytes to unmarshal btfMember")
}
bm.NameOff = bo.Uint32(b[0:])
bm.Type = TypeID(bo.Uint32(b[4:]))
bm.Offset = bo.Uint32(b[8:])
return btfMemberLen, nil
}
type btfVarSecinfo struct {
Type TypeID
Offset uint32
Size uint32
}
var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{}))
func unmarshalBtfVarSecInfo(bvsi *btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfVarSecinfoLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo")
}
bvsi.Type = TypeID(bo.Uint32(b[0:]))
bvsi.Offset = bo.Uint32(b[4:])
bvsi.Size = bo.Uint32(b[8:])
return btfVarSecinfoLen, nil
}
type btfVariable struct {
Linkage uint32
}
var btfVariableLen = int(unsafe.Sizeof(btfVariable{}))
func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfVariableLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable")
}
bv.Linkage = bo.Uint32(b[0:])
return btfVariableLen, nil
}
type btfEnum struct {
NameOff uint32
Val uint32
}
var btfEnumLen = int(unsafe.Sizeof(btfEnum{}))
func unmarshalBtfEnum(be *btfEnum, b []byte, bo binary.ByteOrder) (int, error) {
if btfEnumLen > len(b) {
return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum")
}
be.NameOff = bo.Uint32(b[0:])
be.Val = bo.Uint32(b[4:])
return btfEnumLen, nil
}
type btfEnum64 struct {
NameOff uint32
ValLo32 uint32
ValHi32 uint32
}
var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{}))
func unmarshalBtfEnum64(enum *btfEnum64, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfEnum64Len {
return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64")
}
enum.NameOff = bo.Uint32(b[0:])
enum.ValLo32 = bo.Uint32(b[4:])
enum.ValHi32 = bo.Uint32(b[8:])
return btfEnum64Len, nil
}
type btfParam struct {
NameOff uint32
Type TypeID
}
var btfParamLen = int(unsafe.Sizeof(btfParam{}))
func unmarshalBtfParam(param *btfParam, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfParamLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfParam")
}
param.NameOff = bo.Uint32(b[0:])
param.Type = TypeID(bo.Uint32(b[4:]))
return btfParamLen, nil
}
type btfDeclTag struct {
ComponentIdx uint32
}
var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{}))
func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) {
if len(b) < btfDeclTagLen {
return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag")
}
bdt.ComponentIdx = bo.Uint32(b[0:])
return btfDeclTagLen, nil
}

View File

@@ -0,0 +1,80 @@
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT.
package btf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticFunc-0]
_ = x[GlobalFunc-1]
_ = x[ExternFunc-2]
}
const _FuncLinkage_name = "staticglobalextern"
var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
func (i FuncLinkage) String() string {
if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticVar-0]
_ = x[GlobalVar-1]
_ = x[ExternVar-2]
}
const _VarLinkage_name = "staticglobalextern"
var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
func (i VarLinkage) String() string {
if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[kindUnknown-0]
_ = x[kindInt-1]
_ = x[kindPointer-2]
_ = x[kindArray-3]
_ = x[kindStruct-4]
_ = x[kindUnion-5]
_ = x[kindEnum-6]
_ = x[kindForward-7]
_ = x[kindTypedef-8]
_ = x[kindVolatile-9]
_ = x[kindConst-10]
_ = x[kindRestrict-11]
_ = x[kindFunc-12]
_ = x[kindFuncProto-13]
_ = x[kindVar-14]
_ = x[kindDatasec-15]
_ = x[kindFloat-16]
_ = x[kindDeclTag-17]
_ = x[kindTypeTag-18]
_ = x[kindEnum64-19]
}
const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
func (i btfKind) String() string {
if i >= btfKind(len(_btfKind_index)-1) {
return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]]
}

1264
go/vendor/github.com/cilium/ebpf/btf/core.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

5
go/vendor/github.com/cilium/ebpf/btf/doc.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
// Package btf handles data encoded according to the BPF Type Format.
//
// The canonical documentation lives in the Linux kernel repository and is
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
package btf

832
go/vendor/github.com/cilium/ebpf/btf/ext_info.go generated vendored Normal file
View File

@@ -0,0 +1,832 @@
package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"sort"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
)
// ExtInfos contains ELF section metadata.
type ExtInfos struct {
// The slices are sorted by offset in ascending order.
funcInfos map[string]FuncOffsets
lineInfos map[string]LineOffsets
relocationInfos map[string]CORERelocationInfos
}
// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
//
// Returns an error wrapping ErrNotFound if no ext infos are present.
func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
section := file.Section(".BTF.ext")
if section == nil {
return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
}
if section.ReaderAt == nil {
return nil, fmt.Errorf("compressed ext_info is not supported")
}
return loadExtInfos(section.ReaderAt, file.ByteOrder, spec)
}
// loadExtInfos parses bare ext infos.
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) {
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
// the header structs, resulting in one syscall per header.
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
extHeader, err := parseBTFExtHeader(headerRd, bo)
if err != nil {
return nil, fmt.Errorf("parsing BTF extension header: %w", err)
}
coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
if err != nil {
return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
}
buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF function info: %w", err)
}
funcInfos := make(map[string]FuncOffsets, len(btfFuncInfos))
for section, bfis := range btfFuncInfos {
funcInfos[section], err = newFuncOffsets(bfis, spec)
if err != nil {
return nil, fmt.Errorf("section %s: func infos: %w", section, err)
}
}
buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
btfLineInfos, err := parseLineInfos(buf, bo, spec.strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF line info: %w", err)
}
lineInfos := make(map[string]LineOffsets, len(btfLineInfos))
for section, blis := range btfLineInfos {
lineInfos[section], err = newLineInfos(blis, spec.strings)
if err != nil {
return nil, fmt.Errorf("section %s: line infos: %w", section, err)
}
}
if coreHeader == nil || coreHeader.COREReloLen == 0 {
return &ExtInfos{funcInfos, lineInfos, nil}, nil
}
var btfCORERelos map[string][]bpfCORERelo
buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
btfCORERelos, err = parseCORERelos(buf, bo, spec.strings)
if err != nil {
return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
}
coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos))
for section, brs := range btfCORERelos {
coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings)
if err != nil {
return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
}
}
return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
}
type (
funcInfoMeta struct{}
coreRelocationMeta struct{}
)
// Assign per-section metadata from BTF to a section's instructions.
func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
funcInfos := ei.funcInfos[section]
lineInfos := ei.lineInfos[section]
reloInfos := ei.relocationInfos[section]
AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos)
}
// Assign per-instruction metadata to the instructions in insns.
func AssignMetadataToInstructions(
insns asm.Instructions,
funcInfos FuncOffsets,
lineInfos LineOffsets,
reloInfos CORERelocationInfos,
) {
iter := insns.Iterate()
for iter.Next() {
if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset {
*iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func)
funcInfos = funcInfos[1:]
}
if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset {
*iter.Ins = iter.Ins.WithSource(lineInfos[0].Line)
lineInfos = lineInfos[1:]
}
if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset {
iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo)
reloInfos.infos = reloInfos.infos[1:]
}
}
}
// MarshalExtInfos encodes function and line info embedded in insns into kernel
// wire format.
//
// If an instruction has an [asm.Comment], it will be synthesized into a mostly
// empty line info.
func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) {
iter := insns.Iterate()
for iter.Next() {
if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil {
goto marshal
}
}
return nil, nil, nil
marshal:
var fiBuf, liBuf bytes.Buffer
for {
if fn := FuncMetadata(iter.Ins); fn != nil {
fi := &FuncOffset{
Func: fn,
Offset: iter.Offset,
}
if err := fi.marshal(&fiBuf, b); err != nil {
return nil, nil, fmt.Errorf("write func info: %w", err)
}
}
if source := iter.Ins.Source(); source != nil {
var line *Line
if l, ok := source.(*Line); ok {
line = l
} else {
line = &Line{
line: source.String(),
}
}
li := &LineOffset{
Offset: iter.Offset,
Line: line,
}
if err := li.marshal(&liBuf, b); err != nil {
return nil, nil, fmt.Errorf("write line info: %w", err)
}
}
if !iter.Next() {
break
}
}
return fiBuf.Bytes(), liBuf.Bytes(), nil
}
// btfExtHeader is found at the start of the .BTF.ext section.
type btfExtHeader struct {
Magic uint16
Version uint8
Flags uint8
// HdrLen is larger than the size of struct btfExtHeader when it is
// immediately followed by a btfExtCOREHeader.
HdrLen uint32
FuncInfoOff uint32
FuncInfoLen uint32
LineInfoOff uint32
LineInfoLen uint32
}
// parseBTFExtHeader parses the header of the .BTF.ext section.
func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
var header btfExtHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
if int64(header.HdrLen) < int64(binary.Size(&header)) {
return nil, fmt.Errorf("header length shorter than btfExtHeader size")
}
return &header, nil
}
// funcInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its func_info entries.
func (h *btfExtHeader) funcInfoStart() int64 {
return int64(h.HdrLen + h.FuncInfoOff)
}
// lineInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its line_info entries.
func (h *btfExtHeader) lineInfoStart() int64 {
return int64(h.HdrLen + h.LineInfoOff)
}
// coreReloStart returns the offset from the beginning of the .BTF.ext section
// to the start of its CO-RE relocation entries.
func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
return int64(h.HdrLen + ch.COREReloOff)
}
// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
// field is larger than its size.
type btfExtCOREHeader struct {
COREReloOff uint32
COREReloLen uint32
}
// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
// header bytes are present, extHeader.HdrLen will be larger than the struct,
// indicating the presence of a CO-RE extension header.
func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
extHdrSize := int64(binary.Size(&extHeader))
remainder := int64(extHeader.HdrLen) - extHdrSize
if remainder == 0 {
return nil, nil
}
var coreHeader btfExtCOREHeader
if err := binary.Read(r, bo, &coreHeader); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
return &coreHeader, nil
}
type btfExtInfoSec struct {
SecNameOff uint32
NumInfo uint32
}
// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
// appearing within func_info and line_info sub-sections.
// These headers appear once for each program section in the ELF and are
// followed by one or more func/line_info records for the section.
func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err != nil {
return "", nil, fmt.Errorf("read ext info header: %w", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return "", nil, fmt.Errorf("get section name: %w", err)
}
if secName == "" {
return "", nil, fmt.Errorf("extinfo header refers to empty section name")
}
if infoHeader.NumInfo == 0 {
return "", nil, fmt.Errorf("section %s has zero records", secName)
}
return secName, &infoHeader, nil
}
// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
// or line_infos segment that describes the length of all extInfoRecords in
// that segment.
func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
const maxRecordSize = 256
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return 0, fmt.Errorf("can't read record size: %v", err)
}
if recordSize < 4 {
// Need at least InsnOff worth of bytes per record.
return 0, errors.New("record size too short")
}
if recordSize > maxRecordSize {
return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
}
return recordSize, nil
}
// FuncOffsets is a sorted slice of FuncOffset.
type FuncOffsets []FuncOffset
// The size of a FuncInfo in BTF wire format.
var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
// FuncOffset represents a [btf.Func] and its raw instruction offset within a
// BPF program.
type FuncOffset struct {
Offset asm.RawInstructionOffset
Func *Func
}
type bpfFuncInfo struct {
// Instruction offset of the function within an ELF section.
InsnOff uint32
TypeID TypeID
}
func newFuncOffset(fi bpfFuncInfo, spec *Spec) (*FuncOffset, error) {
typ, err := spec.TypeByID(fi.TypeID)
if err != nil {
return nil, err
}
fn, ok := typ.(*Func)
if !ok {
return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
}
// C doesn't have anonymous functions, but check just in case.
if fn.Name == "" {
return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
}
return &FuncOffset{
asm.RawInstructionOffset(fi.InsnOff),
fn,
}, nil
}
func newFuncOffsets(bfis []bpfFuncInfo, spec *Spec) (FuncOffsets, error) {
fos := make(FuncOffsets, 0, len(bfis))
for _, bfi := range bfis {
fi, err := newFuncOffset(bfi, spec)
if err != nil {
return FuncOffsets{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
}
fos = append(fos, *fi)
}
sort.Slice(fos, func(i, j int) bool {
return fos[i].Offset <= fos[j].Offset
})
return fos, nil
}
// LoadFuncInfos parses BTF func info from kernel wire format into a
// [FuncOffsets], a sorted slice of [btf.Func]s of (sub)programs within a BPF
// program with their corresponding raw instruction offsets.
func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncOffsets, error) {
fis, err := parseFuncInfoRecords(
reader,
bo,
FuncInfoSize,
recordNum,
false,
)
if err != nil {
return FuncOffsets{}, fmt.Errorf("parsing BTF func info: %w", err)
}
return newFuncOffsets(fis, spec)
}
// marshal into the BTF wire format.
func (fi *FuncOffset) marshal(w *bytes.Buffer, b *Builder) error {
id, err := b.Add(fi.Func)
if err != nil {
return err
}
bfi := bpfFuncInfo{
InsnOff: uint32(fi.Offset),
TypeID: id,
}
buf := make([]byte, FuncInfoSize)
internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
_, err = w.Write(buf)
return err
}
// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
// func infos indexed by section name.
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string][]bpfFuncInfo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
// These records appear after a btf_ext_info_sec header in the func_info
// sub-section of .BTF.ext.
func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) {
var out []bpfFuncInfo
var fi bpfFuncInfo
if exp, got := FuncInfoSize, recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
}
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &fi); err != nil {
return nil, fmt.Errorf("can't read function info: %v", err)
}
if offsetInBytes {
if fi.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
fi.InsnOff /= asm.InstructionSize
}
out = append(out, fi)
}
return out, nil
}
var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
// Line represents the location and contents of a single line of source
// code a BPF ELF was compiled from.
type Line struct {
fileName string
line string
lineNumber uint32
lineColumn uint32
}
func (li *Line) FileName() string {
return li.fileName
}
func (li *Line) Line() string {
return li.line
}
func (li *Line) LineNumber() uint32 {
return li.lineNumber
}
func (li *Line) LineColumn() uint32 {
return li.lineColumn
}
func (li *Line) String() string {
return li.line
}
// LineOffsets contains a sorted list of line infos.
type LineOffsets []LineOffset
// LineOffset represents a line info and its raw instruction offset.
type LineOffset struct {
Offset asm.RawInstructionOffset
Line *Line
}
// Constants for the format of bpfLineInfo.LineCol.
const (
bpfLineShift = 10
bpfLineMax = (1 << (32 - bpfLineShift)) - 1
bpfColumnMax = (1 << bpfLineShift) - 1
)
type bpfLineInfo struct {
// Instruction offset of the line within the whole instruction stream, in instructions.
InsnOff uint32
FileNameOff uint32
LineOff uint32
LineCol uint32
}
// LoadLineInfos parses BTF line info in kernel wire format.
func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineOffsets, error) {
lis, err := parseLineInfoRecords(
reader,
bo,
LineInfoSize,
recordNum,
false,
)
if err != nil {
return LineOffsets{}, fmt.Errorf("parsing BTF line info: %w", err)
}
return newLineInfos(lis, spec.strings)
}
func newLineInfo(li bpfLineInfo, strings *stringTable) (LineOffset, error) {
line, err := strings.LookupCached(li.LineOff)
if err != nil {
return LineOffset{}, fmt.Errorf("lookup of line: %w", err)
}
fileName, err := strings.LookupCached(li.FileNameOff)
if err != nil {
return LineOffset{}, fmt.Errorf("lookup of filename: %w", err)
}
lineNumber := li.LineCol >> bpfLineShift
lineColumn := li.LineCol & bpfColumnMax
return LineOffset{
asm.RawInstructionOffset(li.InsnOff),
&Line{
fileName,
line,
lineNumber,
lineColumn,
},
}, nil
}
func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineOffsets, error) {
lis := make([]LineOffset, 0, len(blis))
for _, bli := range blis {
li, err := newLineInfo(bli, strings)
if err != nil {
return LineOffsets{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
}
lis = append(lis, li)
}
sort.Slice(lis, func(i, j int) bool {
return lis[i].Offset <= lis[j].Offset
})
return lis, nil
}
// marshal writes the binary representation of the LineInfo to w.
func (li *LineOffset) marshal(w *bytes.Buffer, b *Builder) error {
line := li.Line
if line.lineNumber > bpfLineMax {
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
}
if line.lineColumn > bpfColumnMax {
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
}
fileNameOff, err := b.addString(line.fileName)
if err != nil {
return fmt.Errorf("file name %q: %w", line.fileName, err)
}
lineOff, err := b.addString(line.line)
if err != nil {
return fmt.Errorf("line %q: %w", line.line, err)
}
bli := bpfLineInfo{
uint32(li.Offset),
fileNameOff,
lineOff,
(line.lineNumber << bpfLineShift) | line.lineColumn,
}
buf := make([]byte, LineInfoSize)
internal.NativeEndian.PutUint32(buf, bli.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
_, err = w.Write(buf)
return err
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
// line infos indexed by section name.
func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string][]bpfLineInfo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
// These records appear after a btf_ext_info_sec header in the line_info
// sub-section of .BTF.ext.
func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) {
if exp, got := uint32(binary.Size(bpfLineInfo{})), recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
}
out := make([]bpfLineInfo, recordNum)
if err := binary.Read(r, bo, out); err != nil {
return nil, fmt.Errorf("can't read line info: %v", err)
}
if offsetInBytes {
for i := range out {
li := &out[i]
if li.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
li.InsnOff /= asm.InstructionSize
}
}
return out, nil
}
// bpfCORERelo matches the kernel's struct bpf_core_relo.
type bpfCORERelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
Kind coreKind
}
type CORERelocation struct {
// The local type of the relocation, stripped of typedefs and qualifiers.
typ Type
accessor coreAccessor
kind coreKind
// The ID of the local type in the source BTF.
id TypeID
}
func (cr *CORERelocation) String() string {
return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
}
func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
return relo
}
// CORERelocationInfos contains a sorted list of co:re relocation infos.
type CORERelocationInfos struct {
infos []coreRelocationInfo
}
type coreRelocationInfo struct {
relo *CORERelocation
offset asm.RawInstructionOffset
}
func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
typ, err := spec.TypeByID(relo.TypeID)
if err != nil {
return nil, err
}
accessorStr, err := strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCOREAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
return &coreRelocationInfo{
&CORERelocation{
typ,
accessor,
relo.Kind,
relo.TypeID,
},
asm.RawInstructionOffset(relo.InsnOff),
}, nil
}
func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) {
rs := CORERelocationInfos{
infos: make([]coreRelocationInfo, 0, len(brs)),
}
for _, br := range brs {
relo, err := newRelocationInfo(br, spec, strings)
if err != nil {
return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err)
}
rs.infos = append(rs.infos, *relo)
}
sort.Slice(rs.infos, func(i, j int) bool {
return rs.infos[i].offset < rs.infos[j].offset
})
return rs, nil
}
var extInfoReloSize = binary.Size(bpfCORERelo{})
// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
// CO-RE relocations indexed by section name.
func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
if recordSize != uint32(extInfoReloSize) {
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string][]bpfCORERelo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseCOREReloRecords(r, bo, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
// coreRelos. These records appear after a btf_ext_info_sec header in the
// core_relos sub-section of .BTF.ext.
func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordNum uint32) ([]bpfCORERelo, error) {
var out []bpfCORERelo
var relo bpfCORERelo
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &relo); err != nil {
return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
}
if relo.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
relo.InsnOff /= asm.InstructionSize
out = append(out, relo)
}
return out, nil
}

158
go/vendor/github.com/cilium/ebpf/btf/feature.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package btf
import (
"errors"
"math"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
// kernel that supports BPF_BTF_LOAD.
var haveBTF = internal.NewFeatureTest("BTF", func() error {
// 0-length anonymous integer
err := probeBTF(&Int{})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
return err
}, "4.18")
// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
// used as a proxy for .bss, .data and .rodata map support, which generally
// come with a Var and Datasec. These were introduced in Linux 5.2.
var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", func() error {
if err := haveBTF(); err != nil {
return err
}
v := &Var{
Name: "a",
Type: &Pointer{(*Void)(nil)},
}
err := probeBTF(v)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: creating the map may still
// succeed without Btf* attrs.
return internal.ErrNotSupported
}
return err
}, "5.2")
// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
// is used as a proxy for ext_info (func_info) support, which depends on
// Func(Proto) by definition.
var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", func() error {
if err := haveBTF(); err != nil {
return err
}
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
return err
}, "5.0")
var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", func() error {
if err := haveProgBTF(); err != nil {
return err
}
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
Linkage: GlobalFunc,
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
}, "5.6")
var haveDeclTags = internal.NewFeatureTest("BTF decl tags", func() error {
if err := haveBTF(); err != nil {
return err
}
t := &Typedef{
Name: "a",
Type: &Int{},
Tags: []string{"a"},
}
err := probeBTF(t)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
}, "5.16")
var haveTypeTags = internal.NewFeatureTest("BTF type tags", func() error {
if err := haveBTF(); err != nil {
return err
}
t := &TypeTag{
Type: &Int{},
Value: "a",
}
err := probeBTF(t)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
}, "5.17")
var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error {
if err := haveBTF(); err != nil {
return err
}
enum := &Enum{
Size: 8,
Values: []EnumValue{
{"TEST", math.MaxUint32 + 1},
},
}
err := probeBTF(enum)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
}, "6.0")
func probeBTF(typ Type) error {
b, err := NewBuilder([]Type{typ})
if err != nil {
return err
}
buf, err := b.Marshal(nil, nil)
if err != nil {
return err
}
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.SlicePointer(buf),
BtfSize: uint32(len(buf)),
})
if err == nil {
fd.Close()
}
return err
}

353
go/vendor/github.com/cilium/ebpf/btf/format.go generated vendored Normal file
View File

@@ -0,0 +1,353 @@
package btf
import (
"errors"
"fmt"
"strings"
)
var errNestedTooDeep = errors.New("nested too deep")
// GoFormatter converts a Type to Go syntax.
//
// A zero GoFormatter is valid to use.
type GoFormatter struct {
w strings.Builder
// Types present in this map are referred to using the given name if they
// are encountered when outputting another type.
Names map[Type]string
// Identifier is called for each field of struct-like types. By default the
// field name is used as is.
Identifier func(string) string
// EnumIdentifier is called for each element of an enum. By default the
// name of the enum type is concatenated with Identifier(element).
EnumIdentifier func(name, element string) string
}
// TypeDeclaration generates a Go type declaration for a BTF type.
func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
gf.w.Reset()
if err := gf.writeTypeDecl(name, typ); err != nil {
return "", err
}
return gf.w.String(), nil
}
func (gf *GoFormatter) identifier(s string) string {
if gf.Identifier != nil {
return gf.Identifier(s)
}
return s
}
func (gf *GoFormatter) enumIdentifier(name, element string) string {
if gf.EnumIdentifier != nil {
return gf.EnumIdentifier(name, element)
}
return name + gf.identifier(element)
}
// writeTypeDecl outputs a declaration of the given type.
//
// It encodes https://golang.org/ref/spec#Type_declarations:
//
// type foo struct { _ structs.HostLayout; bar uint32; }
// type bar int32
func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
if name == "" {
return fmt.Errorf("need a name for type %s", typ)
}
typ = skipQualifiers(typ)
fmt.Fprintf(&gf.w, "type %s ", name)
if err := gf.writeTypeLit(typ, 0); err != nil {
return err
}
e, ok := typ.(*Enum)
if !ok || len(e.Values) == 0 {
return nil
}
gf.w.WriteString("; const ( ")
for _, ev := range e.Values {
id := gf.enumIdentifier(name, ev.Name)
var value any
if e.Signed {
value = int64(ev.Value)
} else {
value = ev.Value
}
fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value)
}
gf.w.WriteString(")")
return nil
}
// writeType outputs the name of a named type or a literal describing the type.
//
// It encodes https://golang.org/ref/spec#Types.
//
// foo (if foo is a named type)
// uint32
func (gf *GoFormatter) writeType(typ Type, depth int) error {
typ = skipQualifiers(typ)
name := gf.Names[typ]
if name != "" {
gf.w.WriteString(name)
return nil
}
return gf.writeTypeLit(typ, depth)
}
// writeTypeLit outputs a literal describing the type.
//
// The function ignores named types.
//
// It encodes https://golang.org/ref/spec#TypeLit.
//
// struct { _ structs.HostLayout; bar uint32; }
// uint32
func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
depth++
if depth > maxResolveDepth {
return errNestedTooDeep
}
var err error
switch v := skipQualifiers(typ).(type) {
case *Int:
err = gf.writeIntLit(v)
case *Enum:
if !v.Signed {
gf.w.WriteRune('u')
}
switch v.Size {
case 1:
gf.w.WriteString("int8")
case 2:
gf.w.WriteString("int16")
case 4:
gf.w.WriteString("int32")
case 8:
gf.w.WriteString("int64")
default:
err = fmt.Errorf("invalid enum size %d", v.Size)
}
case *Typedef:
err = gf.writeType(v.Type, depth)
case *Array:
fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
err = gf.writeType(v.Type, depth)
case *Struct:
err = gf.writeStructLit(v.Size, v.Members, depth)
case *Union:
// Always choose the first member to represent the union in Go.
err = gf.writeStructLit(v.Size, v.Members[:1], depth)
case *Datasec:
err = gf.writeDatasecLit(v, depth)
case *Var:
err = gf.writeTypeLit(v.Type, depth)
default:
return fmt.Errorf("type %T: %w", v, ErrNotSupported)
}
if err != nil {
return fmt.Errorf("%s: %w", typ, err)
}
return nil
}
func (gf *GoFormatter) writeIntLit(i *Int) error {
bits := i.Size * 8
switch i.Encoding {
case Bool:
if i.Size != 1 {
return fmt.Errorf("bool with size %d", i.Size)
}
gf.w.WriteString("bool")
case Char:
if i.Size != 1 {
return fmt.Errorf("char with size %d", i.Size)
}
// BTF doesn't have a way to specify the signedness of a char. Assume
// we are dealing with unsigned, since this works nicely with []byte
// in Go code.
fallthrough
case Unsigned, Signed:
stem := "uint"
if i.Encoding == Signed {
stem = "int"
}
if i.Size > 8 {
fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8)
} else {
fmt.Fprintf(&gf.w, "%s%d", stem, bits)
}
default:
return fmt.Errorf("can't encode %s", i.Encoding)
}
return nil
}
func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
gf.w.WriteString("struct { _ structs.HostLayout; ")
prevOffset := uint32(0)
skippedBitfield := false
for i, m := range members {
if m.BitfieldSize > 0 {
skippedBitfield = true
continue
}
offset := m.Offset.Bytes()
if n := offset - prevOffset; skippedBitfield && n > 0 {
fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
} else {
gf.writePadding(n)
}
fieldSize, err := Sizeof(m.Type)
if err != nil {
return fmt.Errorf("field %d: %w", i, err)
}
prevOffset = offset + uint32(fieldSize)
if prevOffset > size {
return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size)
}
if err := gf.writeStructField(m, depth); err != nil {
return fmt.Errorf("field %d: %w", i, err)
}
}
gf.writePadding(size - prevOffset)
gf.w.WriteString("}")
return nil
}
func (gf *GoFormatter) writeStructField(m Member, depth int) error {
if m.BitfieldSize > 0 {
return fmt.Errorf("bitfields are not supported")
}
if m.Offset%8 != 0 {
return fmt.Errorf("unsupported offset %d", m.Offset)
}
if m.Name == "" {
// Special case a nested anonymous union like
// struct foo { union { int bar; int baz }; }
// by replacing the whole union with its first member.
union, ok := m.Type.(*Union)
if !ok {
return fmt.Errorf("anonymous fields are not supported")
}
if len(union.Members) == 0 {
return errors.New("empty anonymous union")
}
depth++
if depth > maxResolveDepth {
return errNestedTooDeep
}
m := union.Members[0]
size, err := Sizeof(m.Type)
if err != nil {
return err
}
if err := gf.writeStructField(m, depth); err != nil {
return err
}
gf.writePadding(union.Size - uint32(size))
return nil
}
fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
if err := gf.writeType(m.Type, depth); err != nil {
return err
}
gf.w.WriteString("; ")
return nil
}
func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
gf.w.WriteString("struct { _ structs.HostLayout; ")
prevOffset := uint32(0)
for i, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("can't format %s as part of data section", vsi.Type)
}
if v.Linkage != GlobalVar {
// Ignore static, extern, etc. for now.
continue
}
if v.Name == "" {
return fmt.Errorf("variable %d: empty name", i)
}
gf.writePadding(vsi.Offset - prevOffset)
prevOffset = vsi.Offset + vsi.Size
fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
if err := gf.writeType(v.Type, depth); err != nil {
return fmt.Errorf("variable %d: %w", i, err)
}
gf.w.WriteString("; ")
}
gf.writePadding(ds.Size - prevOffset)
gf.w.WriteString("}")
return nil
}
func (gf *GoFormatter) writePadding(bytes uint32) {
if bytes > 0 {
fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
}
}
func skipQualifiers(typ Type) Type {
result := typ
for depth := 0; depth <= maxResolveDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
default:
return result
}
}
return &cycle{typ}
}

333
go/vendor/github.com/cilium/ebpf/btf/handle.go generated vendored Normal file
View File

@@ -0,0 +1,333 @@
package btf
import (
"bytes"
"errors"
"fmt"
"math"
"os"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/platform"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
needsKernelBase bool
}
// NewHandle loads the contents of a [Builder] into the kernel.
//
// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
func NewHandle(b *Builder) (*Handle, error) {
small := getByteSlice()
defer putByteSlice(small)
buf, err := b.Marshal(*small, KernelMarshalOptions())
if err != nil {
return nil, fmt.Errorf("marshal BTF: %w", err)
}
return NewHandleFromRawBTF(buf)
}
// NewHandleFromRawBTF loads raw BTF into the kernel.
//
// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
const minLogSize = 64 * 1024
if platform.IsWindows {
return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.SlicePointer(btf),
BtfSize: uint32(len(btf)),
}
var (
logBuf []byte
err error
)
for {
var fd *sys.FD
fd, err = sys.BtfLoad(attr)
if err == nil {
return &Handle{fd, attr.BtfSize, false}, nil
}
if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize {
// The log buffer already has the correct size.
break
}
if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) {
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
// if there are other verification errors. ENOSPC is only returned when
// the BTF blob is correct, a log was requested, and the provided buffer
// is too small. We're therefore not sure whether we got the full
// log or not.
break
}
// Make an educated guess how large the buffer should be. Start
// at a reasonable minimum and then double the size.
logSize := uint32(max(len(logBuf)*2, minLogSize))
if int(logSize) < len(logBuf) {
return nil, errors.New("overflow while probing log buffer size")
}
if attr.BtfLogTrueSize != 0 {
// The kernel has given us a hint how large the log buffer has to be.
logSize = attr.BtfLogTrueSize
}
logBuf = make([]byte, logSize)
attr.BtfLogSize = logSize
attr.BtfLogBuf = sys.SlicePointer(logBuf)
attr.BtfLogLevel = 1
}
if err := haveBTF(); err != nil {
return nil, err
}
return nil, internal.ErrorWithLog("load btf", err, logBuf)
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
if platform.IsWindows {
return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS)
}
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size, info.IsModule()}, nil
}
// Spec parses the kernel BTF into Go types.
//
// base must contain type information for vmlinux if the handle is for
// a kernel module. It may be nil otherwise.
func (h *Handle) Spec(base *Spec) (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf = sys.SlicePointer(btfBuffer)
btfInfo.BtfSize = uint32(len(btfBuffer))
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
if h.needsKernelBase && base == nil {
return nil, fmt.Errorf("missing base types")
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
// HandleInfo describes a Handle.
type HandleInfo struct {
// ID of this handle in the kernel. The ID is only valid as long as the
// associated handle is kept alive.
ID ID
// Name is an identifying name for the BTF, currently only used by the
// kernel.
Name string
// IsKernel is true if the BTF originated with the kernel and not
// userspace.
IsKernel bool
// Size of the raw BTF in bytes.
size uint32
}
func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
// We invoke the syscall once with a empty BTF and name buffers to get size
// information to allocate buffers. Then we invoke it a second time with
// buffers to receive the data.
var btfInfo sys.BtfInfo
if err := sys.ObjInfo(fd, &btfInfo); err != nil {
return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
}
if btfInfo.NameLen > 0 {
// NameLen doesn't account for the terminating NUL.
btfInfo.NameLen++
}
// Don't pull raw BTF by default, since it may be quite large.
btfSize := btfInfo.BtfSize
btfInfo.BtfSize = 0
nameBuffer := make([]byte, btfInfo.NameLen)
btfInfo.Name = sys.SlicePointer(nameBuffer)
btfInfo.NameLen = uint32(len(nameBuffer))
if err := sys.ObjInfo(fd, &btfInfo); err != nil {
return nil, err
}
return &HandleInfo{
ID: ID(btfInfo.Id),
Name: unix.ByteSliceToString(nameBuffer),
IsKernel: btfInfo.KernelBtf != 0,
size: btfSize,
}, nil
}
// IsVmlinux returns true if the BTF is for the kernel itself.
func (i *HandleInfo) IsVmlinux() bool {
return i.IsKernel && i.Name == "vmlinux"
}
// IsModule returns true if the BTF is for a kernel module.
func (i *HandleInfo) IsModule() bool {
return i.IsKernel && i.Name != "vmlinux"
}
// HandleIterator allows enumerating BTF blobs loaded into the kernel.
type HandleIterator struct {
// The ID of the current handle. Only valid after a call to Next.
ID ID
// The current Handle. Only valid until a call to Next.
// See Take if you want to retain the handle.
Handle *Handle
err error
}
// Next retrieves a handle for the next BTF object.
//
// Returns true if another BTF object was found. Call [HandleIterator.Err] after
// the function returns false.
func (it *HandleIterator) Next() bool {
if platform.IsWindows {
it.err = fmt.Errorf("btf: %w", internal.ErrNotSupportedOnOS)
return false
}
id := it.ID
for {
attr := &sys.BtfGetNextIdAttr{Id: id}
err := sys.BtfGetNextId(attr)
if errors.Is(err, os.ErrNotExist) {
// There are no more BTF objects.
break
} else if err != nil {
it.err = fmt.Errorf("get next BTF ID: %w", err)
break
}
id = attr.NextId
handle, err := NewHandleFromID(id)
if errors.Is(err, os.ErrNotExist) {
// Try again with the next ID.
continue
} else if err != nil {
it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
break
}
it.Handle.Close()
it.ID, it.Handle = id, handle
return true
}
// No more handles or we encountered an error.
it.Handle.Close()
it.Handle = nil
return false
}
// Take the ownership of the current handle.
//
// It's the callers responsibility to close the handle.
func (it *HandleIterator) Take() *Handle {
handle := it.Handle
it.Handle = nil
return handle
}
// Err returns an error if iteration failed for some reason.
func (it *HandleIterator) Err() error {
return it.err
}
// FindHandle returns the first handle for which predicate returns true.
//
// Requires CAP_SYS_ADMIN.
//
// Returns an error wrapping ErrNotFound if predicate never returns true or if
// there is no BTF loaded into the kernel.
func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) {
it := new(HandleIterator)
defer it.Handle.Close()
for it.Next() {
info, err := it.Handle.Info()
if err != nil {
return nil, fmt.Errorf("info for ID %d: %w", it.ID, err)
}
if predicate(info) {
return it.Take(), nil
}
}
if err := it.Err(); err != nil {
return nil, fmt.Errorf("iterate handles: %w", err)
}
return nil, fmt.Errorf("find handle: %w", ErrNotFound)
}

298
go/vendor/github.com/cilium/ebpf/btf/kernel.go generated vendored Normal file
View File

@@ -0,0 +1,298 @@
package btf
import (
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"sort"
"sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/linux"
"github.com/cilium/ebpf/internal/platform"
)
// globalCache amortises decoding BTF across all users of the library.
var globalCache = struct {
sync.RWMutex
kernel *Spec
modules map[string]*Spec
}{
modules: make(map[string]*Spec),
}
// FlushKernelSpec removes any cached kernel type information.
func FlushKernelSpec() {
globalCache.Lock()
defer globalCache.Unlock()
globalCache.kernel = nil
globalCache.modules = make(map[string]*Spec)
}
// LoadKernelSpec returns the current kernel's BTF information.
//
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
//
// Consider using [Cache] instead.
func LoadKernelSpec() (*Spec, error) {
spec, err := loadCachedKernelSpec()
return spec.Copy(), err
}
// load (and cache) the kernel spec.
//
// Does not copy Spec.
func loadCachedKernelSpec() (*Spec, error) {
globalCache.RLock()
spec := globalCache.kernel
globalCache.RUnlock()
if spec != nil {
return spec, nil
}
globalCache.Lock()
defer globalCache.Unlock()
spec, err := loadKernelSpec()
if err != nil {
return nil, err
}
globalCache.kernel = spec
return spec, nil
}
// LoadKernelModuleSpec returns the BTF information for the named kernel module.
//
// Using [Cache.Module] is faster when loading BTF for more than one module.
//
// Defaults to /sys/kernel/btf/<module>.
// Returns an error wrapping ErrNotSupported if BTF is not enabled.
// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist.
func LoadKernelModuleSpec(module string) (*Spec, error) {
spec, err := loadCachedKernelModuleSpec(module)
return spec.Copy(), err
}
// load (and cache) a module spec.
//
// Does not copy Spec.
func loadCachedKernelModuleSpec(module string) (*Spec, error) {
globalCache.RLock()
spec := globalCache.modules[module]
globalCache.RUnlock()
if spec != nil {
return spec, nil
}
base, err := loadCachedKernelSpec()
if err != nil {
return nil, err
}
// NB: This only allows a single module to be parsed at a time. Not sure
// it makes a difference.
globalCache.Lock()
defer globalCache.Unlock()
spec, err = loadKernelModuleSpec(module, base)
if err != nil {
return nil, err
}
globalCache.modules[module] = spec
return spec, nil
}
func loadKernelSpec() (_ *Spec, _ error) {
if platform.IsWindows {
return nil, internal.ErrNotSupportedOnOS
}
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
return spec, err
}
file, err := findVMLinux()
if err != nil {
return nil, err
}
defer file.Close()
spec, err := LoadSpecFromReader(file)
return spec, err
}
func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) {
if platform.IsWindows {
return nil, internal.ErrNotSupportedOnOS
}
dir, file := filepath.Split(module)
if dir != "" || filepath.Ext(file) != "" {
return nil, fmt.Errorf("invalid module name %q", module)
}
fh, err := os.Open(filepath.Join("/sys/kernel/btf", module))
if err != nil {
return nil, err
}
defer fh.Close()
return loadRawSpec(fh, internal.NativeEndian, base)
}
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
func findVMLinux() (*os.File, error) {
if platform.IsWindows {
return nil, fmt.Errorf("find vmlinux: %w", internal.ErrNotSupportedOnOS)
}
release, err := linux.KernelRelease()
if err != nil {
return nil, err
}
// use same list of locations as libbpf
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
locations := []string{
"/boot/vmlinux-%s",
"/lib/modules/%s/vmlinux-%[1]s",
"/lib/modules/%s/build/vmlinux",
"/usr/lib/modules/%s/kernel/vmlinux",
"/usr/lib/debug/boot/vmlinux-%s",
"/usr/lib/debug/boot/vmlinux-%s.debug",
"/usr/lib/debug/lib/modules/%s/vmlinux",
}
for _, loc := range locations {
file, err := os.Open(fmt.Sprintf(loc, release))
if errors.Is(err, os.ErrNotExist) {
continue
}
return file, err
}
return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
}
// Cache allows to amortise the cost of decoding BTF across multiple call-sites.
//
// It is not safe for concurrent use.
type Cache struct {
KernelTypes *Spec
ModuleTypes map[string]*Spec
LoadedModules []string
}
// NewCache creates a new Cache.
//
// Opportunistically reuses a global cache if possible.
func NewCache() *Cache {
globalCache.RLock()
defer globalCache.RUnlock()
// This copy is either a no-op or very cheap, since the spec won't contain
// any inflated types.
kernel := globalCache.kernel.Copy()
if kernel == nil {
return &Cache{}
}
modules := make(map[string]*Spec, len(globalCache.modules))
for name, spec := range globalCache.modules {
decoder, _ := rebaseDecoder(spec.decoder, kernel.decoder)
// NB: Kernel module BTF can't contain ELF fixups because it is always
// read from sysfs.
modules[name] = &Spec{decoder: decoder}
}
if len(modules) == 0 {
return &Cache{kernel, nil, nil}
}
return &Cache{kernel, modules, nil}
}
// Kernel is equivalent to [LoadKernelSpec], except that repeated calls do
// not copy the Spec.
func (c *Cache) Kernel() (*Spec, error) {
if c.KernelTypes != nil {
return c.KernelTypes, nil
}
var err error
c.KernelTypes, err = LoadKernelSpec()
return c.KernelTypes, err
}
// Module is equivalent to [LoadKernelModuleSpec], except that repeated calls do
// not copy the spec.
//
// All modules also share the return value of [Kernel] as their base.
func (c *Cache) Module(name string) (*Spec, error) {
if spec := c.ModuleTypes[name]; spec != nil {
return spec, nil
}
if c.ModuleTypes == nil {
c.ModuleTypes = make(map[string]*Spec)
}
base, err := c.Kernel()
if err != nil {
return nil, err
}
spec, err := loadCachedKernelModuleSpec(name)
if err != nil {
return nil, err
}
// Important: base is shared between modules. This allows inflating common
// types only once.
decoder, err := rebaseDecoder(spec.decoder, base.decoder)
if err != nil {
return nil, err
}
spec = &Spec{decoder: decoder}
c.ModuleTypes[name] = spec
return spec, err
}
// Modules returns a sorted list of all loaded modules.
func (c *Cache) Modules() ([]string, error) {
if c.LoadedModules != nil {
return c.LoadedModules, nil
}
btfDir, err := os.Open("/sys/kernel/btf")
if err != nil {
return nil, err
}
defer btfDir.Close()
entries, err := btfDir.Readdirnames(-1)
if err != nil {
return nil, err
}
entries = slices.DeleteFunc(entries, func(s string) bool {
return s == "vmlinux"
})
sort.Strings(entries)
c.LoadedModules = entries
return entries, nil
}

696
go/vendor/github.com/cilium/ebpf/btf/marshal.go generated vendored Normal file
View File

@@ -0,0 +1,696 @@
package btf
import (
"encoding/binary"
"errors"
"fmt"
"maps"
"math"
"slices"
"sync"
"unsafe"
"github.com/cilium/ebpf/internal"
)
type MarshalOptions struct {
// Target byte order. Defaults to the system's native endianness.
Order binary.ByteOrder
// Remove function linkage information for compatibility with <5.6 kernels.
StripFuncLinkage bool
// Replace decl tags with a placeholder for compatibility with <5.16 kernels.
ReplaceDeclTags bool
// Replace TypeTags with a placeholder for compatibility with <5.17 kernels.
ReplaceTypeTags bool
// Replace Enum64 with a placeholder for compatibility with <6.0 kernels.
ReplaceEnum64 bool
// Prevent the "No type found" error when loading BTF without any types.
PreventNoTypeFound bool
}
// KernelMarshalOptions will generate BTF suitable for the current kernel.
func KernelMarshalOptions() *MarshalOptions {
return &MarshalOptions{
Order: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
ReplaceDeclTags: haveDeclTags() != nil,
ReplaceTypeTags: haveTypeTags() != nil,
ReplaceEnum64: haveEnum64() != nil,
PreventNoTypeFound: true, // All current kernels require this.
}
}
// encoder turns Types into raw BTF.
type encoder struct {
MarshalOptions
pending internal.Deque[Type]
strings *stringTableBuilder
ids map[Type]TypeID
visited map[Type]struct{}
lastID TypeID
}
var bufferPool = sync.Pool{
New: func() any {
buf := make([]byte, btfHeaderLen+128)
return &buf
},
}
func getByteSlice() *[]byte {
return bufferPool.Get().(*[]byte)
}
func putByteSlice(buf *[]byte) {
*buf = (*buf)[:0]
bufferPool.Put(buf)
}
// Builder turns Types into raw BTF.
//
// The default value may be used and represents an empty BTF blob. Void is
// added implicitly if necessary.
type Builder struct {
// Explicitly added types.
types []Type
// IDs for all added types which the user knows about.
stableIDs map[Type]TypeID
// Explicitly added strings.
strings *stringTableBuilder
}
// NewBuilder creates a Builder from a list of types.
//
// It is more efficient than calling [Add] individually.
//
// Returns an error if adding any of the types fails.
func NewBuilder(types []Type) (*Builder, error) {
b := &Builder{
make([]Type, 0, len(types)),
make(map[Type]TypeID, len(types)),
nil,
}
for _, typ := range types {
_, err := b.Add(typ)
if err != nil {
return nil, fmt.Errorf("add %s: %w", typ, err)
}
}
return b, nil
}
// Empty returns true if neither types nor strings have been added.
func (b *Builder) Empty() bool {
return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0)
}
// Add a Type and allocate a stable ID for it.
//
// Adding the identical Type multiple times is valid and will return the same ID.
//
// See [Type] for details on identity.
func (b *Builder) Add(typ Type) (TypeID, error) {
if b.stableIDs == nil {
b.stableIDs = make(map[Type]TypeID)
}
if _, ok := typ.(*Void); ok {
// Equality is weird for void, since it is a zero sized type.
return 0, nil
}
if ds, ok := typ.(*Datasec); ok {
if err := datasecResolveWorkaround(b, ds); err != nil {
return 0, err
}
}
id, ok := b.stableIDs[typ]
if ok {
return id, nil
}
b.types = append(b.types, typ)
id = TypeID(len(b.types))
if int(id) != len(b.types) {
return 0, fmt.Errorf("no more type IDs")
}
b.stableIDs[typ] = id
return id, nil
}
// Marshal encodes all types in the Marshaler into BTF wire format.
//
// opts may be nil.
func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
stb := b.strings
if stb == nil {
// Assume that most types are named. This makes encoding large BTF like
// vmlinux a lot cheaper.
stb = newStringTableBuilder(len(b.types))
} else {
// Avoid modifying the Builder's string table.
stb = b.strings.Copy()
}
if opts == nil {
opts = &MarshalOptions{Order: internal.NativeEndian}
}
// Reserve space for the BTF header.
buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
e := encoder{
MarshalOptions: *opts,
strings: stb,
lastID: TypeID(len(b.types)),
visited: make(map[Type]struct{}, len(b.types)),
ids: maps.Clone(b.stableIDs),
}
if e.ids == nil {
e.ids = make(map[Type]TypeID)
}
types := b.types
if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound {
// We have strings that need to be written out,
// but no types (besides the implicit Void).
// Kernels as recent as v6.7 refuse to load such BTF
// with a "No type found" error in the log.
// Fix this by adding a dummy type.
types = []Type{&Int{Size: 0}}
}
// Ensure that types are marshaled in the exact order they were Add()ed.
// Otherwise the ID returned from Add() won't match.
e.pending.Grow(len(types))
for _, typ := range types {
e.pending.Push(typ)
}
buf, err := e.deflatePending(buf)
if err != nil {
return nil, err
}
length := len(buf)
typeLen := uint32(length - btfHeaderLen)
stringLen := e.strings.Length()
buf = e.strings.AppendEncoded(buf)
// Fill out the header, and write it out.
header := &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(btfHeaderLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringLen),
}
_, err = binary.Encode(buf[:btfHeaderLen], e.Order, header)
if err != nil {
return nil, fmt.Errorf("write header: %v", err)
}
return buf, nil
}
// addString adds a string to the resulting BTF.
//
// Adding the same string multiple times will return the same result.
//
// Returns an identifier into the string table or an error if the string
// contains invalid characters.
func (b *Builder) addString(str string) (uint32, error) {
if b.strings == nil {
b.strings = newStringTableBuilder(0)
}
return b.strings.Add(str)
}
func (e *encoder) allocateIDs(root Type) error {
for typ := range postorder(root, e.visited) {
if _, ok := typ.(*Void); ok {
continue
}
if _, ok := e.ids[typ]; ok {
continue
}
id := e.lastID + 1
if id < e.lastID {
return errors.New("type ID overflow")
}
e.pending.Push(typ)
e.ids[typ] = id
e.lastID = id
}
return nil
}
// id returns the ID for the given type or panics with an error.
func (e *encoder) id(typ Type) TypeID {
if _, ok := typ.(*Void); ok {
return 0
}
id, ok := e.ids[typ]
if !ok {
panic(fmt.Errorf("no ID for type %v", typ))
}
return id
}
func (e *encoder) deflatePending(buf []byte) ([]byte, error) {
// Declare root outside of the loop to avoid repeated heap allocations.
var root Type
for !e.pending.Empty() {
root = e.pending.Shift()
// Allocate IDs for all children of typ, including transitive dependencies.
err := e.allocateIDs(root)
if err != nil {
return nil, err
}
buf, err = e.deflateType(buf, root)
if err != nil {
id := e.ids[root]
return nil, fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
}
}
return buf, nil
}
func (e *encoder) deflateType(buf []byte, typ Type) (_ []byte, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
err, ok = r.(error)
if !ok {
panic(r)
}
}
}()
var raw btfType
raw.NameOff, err = e.strings.Add(typ.TypeName())
if err != nil {
return nil, err
}
// Reserve space for the btfType header.
start := len(buf)
buf = append(buf, make([]byte, unsafe.Sizeof(raw))...)
switch v := typ.(type) {
case *Void:
return nil, errors.New("Void is implicit in BTF wire format")
case *Int:
buf, err = e.deflateInt(buf, &raw, v)
case *Pointer:
raw.SetKind(kindPointer)
raw.SetType(e.id(v.Target))
case *Array:
raw.SetKind(kindArray)
buf, err = binary.Append(buf, e.Order, &btfArray{
e.id(v.Type),
e.id(v.Index),
v.Nelems,
})
case *Struct:
raw.SetKind(kindStruct)
raw.SetSize(v.Size)
buf, err = e.deflateMembers(buf, &raw, v.Members)
case *Union:
buf, err = e.deflateUnion(buf, &raw, v)
case *Enum:
if v.Size == 8 {
buf, err = e.deflateEnum64(buf, &raw, v)
} else {
buf, err = e.deflateEnum(buf, &raw, v)
}
case *Fwd:
raw.SetKind(kindForward)
raw.SetFwdKind(v.Kind)
case *Typedef:
raw.SetKind(kindTypedef)
raw.SetType(e.id(v.Type))
case *Volatile:
raw.SetKind(kindVolatile)
raw.SetType(e.id(v.Type))
case *Const:
e.deflateConst(&raw, v)
case *Restrict:
raw.SetKind(kindRestrict)
raw.SetType(e.id(v.Type))
case *Func:
raw.SetKind(kindFunc)
raw.SetType(e.id(v.Type))
if !e.StripFuncLinkage {
raw.SetLinkage(v.Linkage)
}
case *FuncProto:
raw.SetKind(kindFuncProto)
raw.SetType(e.id(v.Return))
raw.SetVlen(len(v.Params))
buf, err = e.deflateFuncParams(buf, v.Params)
case *Var:
raw.SetKind(kindVar)
raw.SetType(e.id(v.Type))
buf, err = binary.Append(buf, e.Order, btfVariable{uint32(v.Linkage)})
case *Datasec:
raw.SetKind(kindDatasec)
raw.SetSize(v.Size)
raw.SetVlen(len(v.Vars))
buf, err = e.deflateVarSecinfos(buf, v.Vars)
case *Float:
raw.SetKind(kindFloat)
raw.SetSize(v.Size)
case *declTag:
buf, err = e.deflateDeclTag(buf, &raw, v)
case *TypeTag:
err = e.deflateTypeTag(&raw, v)
default:
return nil, fmt.Errorf("don't know how to deflate %T", v)
}
if err != nil {
return nil, err
}
header := buf[start : start+int(unsafe.Sizeof(raw))]
if _, err = raw.Encode(header, e.Order); err != nil {
return nil, err
}
return buf, nil
}
func (e *encoder) deflateInt(buf []byte, raw *btfType, i *Int) ([]byte, error) {
raw.SetKind(kindInt)
raw.SetSize(i.Size)
var bi btfInt
bi.SetEncoding(i.Encoding)
// We need to set bits in addition to size, since btf_type_int_is_regular
// otherwise flags this as a bitfield.
bi.SetBits(byte(i.Size) * 8)
return binary.Append(buf, e.Order, bi)
}
func (e *encoder) deflateDeclTag(buf []byte, raw *btfType, tag *declTag) ([]byte, error) {
// Replace a decl tag with an integer for compatibility with <5.16 kernels,
// following libbpf behaviour.
if e.ReplaceDeclTags {
typ := &Int{"decl_tag_placeholder", 1, Unsigned}
buf, err := e.deflateInt(buf, raw, typ)
if err != nil {
return nil, err
}
// Add the placeholder type name to the string table. The encoder added the
// original type name before this call.
raw.NameOff, err = e.strings.Add(typ.TypeName())
return buf, err
}
var err error
raw.SetKind(kindDeclTag)
raw.SetType(e.id(tag.Type))
raw.NameOff, err = e.strings.Add(tag.Value)
if err != nil {
return nil, err
}
return binary.Append(buf, e.Order, btfDeclTag{uint32(tag.Index)})
}
func (e *encoder) deflateConst(raw *btfType, c *Const) {
raw.SetKind(kindConst)
raw.SetType(e.id(c.Type))
}
func (e *encoder) deflateTypeTag(raw *btfType, tag *TypeTag) (err error) {
// Replace a type tag with a const qualifier for compatibility with <5.17
// kernels, following libbpf behaviour.
if e.ReplaceTypeTags {
e.deflateConst(raw, &Const{tag.Type})
return nil
}
raw.SetKind(kindTypeTag)
raw.SetType(e.id(tag.Type))
raw.NameOff, err = e.strings.Add(tag.Value)
return
}
func (e *encoder) deflateUnion(buf []byte, raw *btfType, union *Union) ([]byte, error) {
raw.SetKind(kindUnion)
raw.SetSize(union.Size)
return e.deflateMembers(buf, raw, union.Members)
}
func (e *encoder) deflateMembers(buf []byte, header *btfType, members []Member) ([]byte, error) {
var bm btfMember
isBitfield := false
buf = slices.Grow(buf, len(members)*int(unsafe.Sizeof(bm)))
for _, member := range members {
isBitfield = isBitfield || member.BitfieldSize > 0
offset := member.Offset
if isBitfield {
offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
}
nameOff, err := e.strings.Add(member.Name)
if err != nil {
return nil, err
}
bm = btfMember{
nameOff,
e.id(member.Type),
uint32(offset),
}
buf, err = binary.Append(buf, e.Order, &bm)
if err != nil {
return nil, err
}
}
header.SetVlen(len(members))
header.SetBitfield(isBitfield)
return buf, nil
}
func (e *encoder) deflateEnum(buf []byte, raw *btfType, enum *Enum) ([]byte, error) {
raw.SetKind(kindEnum)
raw.SetSize(enum.Size)
raw.SetVlen(len(enum.Values))
// Signedness appeared together with ENUM64 support.
raw.SetSigned(enum.Signed && !e.ReplaceEnum64)
return e.deflateEnumValues(buf, enum)
}
func (e *encoder) deflateEnumValues(buf []byte, enum *Enum) ([]byte, error) {
var be btfEnum
buf = slices.Grow(buf, len(enum.Values)*int(unsafe.Sizeof(be)))
for _, value := range enum.Values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
if enum.Signed {
if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 {
return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name)
}
} else {
if value.Value > math.MaxUint32 {
return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name)
}
}
be = btfEnum{
nameOff,
uint32(value.Value),
}
buf, err = binary.Append(buf, e.Order, &be)
if err != nil {
return nil, err
}
}
return buf, nil
}
func (e *encoder) deflateEnum64(buf []byte, raw *btfType, enum *Enum) ([]byte, error) {
if e.ReplaceEnum64 {
// Replace the ENUM64 with a union of fields with the correct size.
// This matches libbpf behaviour on purpose.
placeholder := &Int{
"enum64_placeholder",
enum.Size,
Unsigned,
}
if enum.Signed {
placeholder.Encoding = Signed
}
if err := e.allocateIDs(placeholder); err != nil {
return nil, fmt.Errorf("add enum64 placeholder: %w", err)
}
members := make([]Member, 0, len(enum.Values))
for _, v := range enum.Values {
members = append(members, Member{
Name: v.Name,
Type: placeholder,
})
}
return e.deflateUnion(buf, raw, &Union{enum.Name, enum.Size, members, nil})
}
raw.SetKind(kindEnum64)
raw.SetSize(enum.Size)
raw.SetVlen(len(enum.Values))
raw.SetSigned(enum.Signed)
return e.deflateEnum64Values(buf, enum.Values)
}
func (e *encoder) deflateEnum64Values(buf []byte, values []EnumValue) ([]byte, error) {
var be btfEnum64
buf = slices.Grow(buf, len(values)*int(unsafe.Sizeof(be)))
for _, value := range values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
be = btfEnum64{
nameOff,
uint32(value.Value),
uint32(value.Value >> 32),
}
buf, err = binary.Append(buf, e.Order, &be)
if err != nil {
return nil, err
}
}
return buf, nil
}
func (e *encoder) deflateFuncParams(buf []byte, params []FuncParam) ([]byte, error) {
var bp btfParam
buf = slices.Grow(buf, len(params)*int(unsafe.Sizeof(bp)))
for _, param := range params {
nameOff, err := e.strings.Add(param.Name)
if err != nil {
return nil, err
}
bp = btfParam{
nameOff,
e.id(param.Type),
}
buf, err = binary.Append(buf, e.Order, &bp)
if err != nil {
return nil, err
}
}
return buf, nil
}
func (e *encoder) deflateVarSecinfos(buf []byte, vars []VarSecinfo) ([]byte, error) {
var vsi btfVarSecinfo
var err error
buf = slices.Grow(buf, len(vars)*int(unsafe.Sizeof(vsi)))
for _, v := range vars {
vsi = btfVarSecinfo{
e.id(v.Type),
v.Offset,
v.Size,
}
buf, err = binary.Append(buf, e.Order, vsi)
if err != nil {
return nil, err
}
}
return buf, nil
}
// MarshalMapKV creates a BTF object containing a map key and value.
//
// The function is intended for the use of the ebpf package and may be removed
// at any point in time.
func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
var b Builder
if key != nil {
keyID, err = b.Add(key)
if err != nil {
return nil, 0, 0, fmt.Errorf("add key type: %w", err)
}
}
if value != nil {
valueID, err = b.Add(value)
if err != nil {
return nil, 0, 0, fmt.Errorf("add value type: %w", err)
}
}
handle, err := NewHandle(&b)
if err != nil {
// Check for 'full' map BTF support, since kernels between 4.18 and 5.2
// already support BTF blobs for maps without Var or Datasec just fine.
if err := haveMapBTF(); err != nil {
return nil, 0, 0, err
}
}
return handle, keyID, valueID, err
}

206
go/vendor/github.com/cilium/ebpf/btf/strings.go generated vendored Normal file
View File

@@ -0,0 +1,206 @@
package btf
import (
"bytes"
"errors"
"fmt"
"io"
"maps"
"strings"
"sync"
)
// stringTable is contains a sequence of null-terminated strings.
//
// It is safe for concurrent use.
type stringTable struct {
base *stringTable
bytes []byte
mu sync.Mutex
cache map[uint32]string
}
// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
type sizedReader interface {
io.Reader
Size() int64
}
func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
// When parsing split BTF's string table, the first entry offset is derived
// from the last entry offset of the base BTF.
firstStringOffset := uint32(0)
if base != nil {
firstStringOffset = uint32(len(base.bytes))
}
bytes := make([]byte, r.Size())
if _, err := io.ReadFull(r, bytes); err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, errors.New("string table is empty")
}
if bytes[len(bytes)-1] != 0 {
return nil, errors.New("string table isn't null terminated")
}
if firstStringOffset == 0 && bytes[0] != 0 {
return nil, errors.New("first item in string table is non-empty")
}
return &stringTable{base: base, bytes: bytes}, nil
}
func (st *stringTable) Lookup(offset uint32) (string, error) {
// Fast path: zero offset is the empty string, looked up frequently.
if offset == 0 {
return "", nil
}
b, err := st.lookupSlow(offset)
return string(b), err
}
func (st *stringTable) LookupBytes(offset uint32) ([]byte, error) {
// Fast path: zero offset is the empty string, looked up frequently.
if offset == 0 {
return nil, nil
}
return st.lookupSlow(offset)
}
func (st *stringTable) lookupSlow(offset uint32) ([]byte, error) {
if st.base != nil {
n := uint32(len(st.base.bytes))
if offset < n {
return st.base.lookupSlow(offset)
}
offset -= n
}
if offset > uint32(len(st.bytes)) {
return nil, fmt.Errorf("offset %d is out of bounds of string table", offset)
}
if offset > 0 && st.bytes[offset-1] != 0 {
return nil, fmt.Errorf("offset %d is not the beginning of a string", offset)
}
i := bytes.IndexByte(st.bytes[offset:], 0)
return st.bytes[offset : offset+uint32(i)], nil
}
// LookupCache returns the string at the given offset, caching the result
// for future lookups.
func (cst *stringTable) LookupCached(offset uint32) (string, error) {
// Fast path: zero offset is the empty string, looked up frequently.
if offset == 0 {
return "", nil
}
cst.mu.Lock()
defer cst.mu.Unlock()
if str, ok := cst.cache[offset]; ok {
return str, nil
}
str, err := cst.Lookup(offset)
if err != nil {
return "", err
}
if cst.cache == nil {
cst.cache = make(map[uint32]string)
}
cst.cache[offset] = str
return str, nil
}
// stringTableBuilder builds BTF string tables.
type stringTableBuilder struct {
length uint32
strings map[string]uint32
}
// newStringTableBuilder creates a builder with the given capacity.
//
// capacity may be zero.
func newStringTableBuilder(capacity int) *stringTableBuilder {
var stb stringTableBuilder
if capacity == 0 {
// Use the runtime's small default size.
stb.strings = make(map[string]uint32)
} else {
stb.strings = make(map[string]uint32, capacity)
}
// Ensure that the empty string is at index 0.
stb.append("")
return &stb
}
// Add a string to the table.
//
// Adding the same string multiple times will only store it once.
func (stb *stringTableBuilder) Add(str string) (uint32, error) {
if strings.IndexByte(str, 0) != -1 {
return 0, fmt.Errorf("string contains null: %q", str)
}
offset, ok := stb.strings[str]
if ok {
return offset, nil
}
return stb.append(str), nil
}
func (stb *stringTableBuilder) append(str string) uint32 {
offset := stb.length
stb.length += uint32(len(str)) + 1
stb.strings[str] = offset
return offset
}
// Lookup finds the offset of a string in the table.
//
// Returns an error if str hasn't been added yet.
func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
offset, ok := stb.strings[str]
if !ok {
return 0, fmt.Errorf("string %q is not in table", str)
}
return offset, nil
}
// Length returns the length in bytes.
func (stb *stringTableBuilder) Length() int {
return int(stb.length)
}
// AppendEncoded appends the string table to the end of the provided buffer.
func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
n := len(buf)
buf = append(buf, make([]byte, stb.Length())...)
strings := buf[n:]
for str, offset := range stb.strings {
copy(strings[offset:], str)
}
return buf
}
// Copy the string table builder.
func (stb *stringTableBuilder) Copy() *stringTableBuilder {
return &stringTableBuilder{
stb.length,
maps.Clone(stb.strings),
}
}

159
go/vendor/github.com/cilium/ebpf/btf/traversal.go generated vendored Normal file
View File

@@ -0,0 +1,159 @@
package btf
import (
"fmt"
"iter"
)
// Functions to traverse a cyclic graph of types. The below was very useful:
// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
// postorder yields all types reachable from root in post order.
func postorder(root Type, visited map[Type]struct{}) iter.Seq[Type] {
return func(yield func(Type) bool) {
visitInPostorder(root, visited, yield)
}
}
// visitInPostorder is a separate function to avoid arguments escaping
// to the heap. Don't change the setup without re-running the benchmarks.
func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool {
if _, ok := visited[root]; ok {
return true
}
if visited == nil {
visited = make(map[Type]struct{})
}
visited[root] = struct{}{}
for child := range children(root) {
if !visitInPostorder(*child, visited, yield) {
return false
}
}
return yield(root)
}
// children yields all direct descendants of typ.
func children(typ Type) iter.Seq[*Type] {
return func(yield func(*Type) bool) {
// Explicitly type switch on the most common types to allow the inliner to
// do its work. This avoids allocating intermediate slices from walk() on
// the heap.
var tags []string
switch v := typ.(type) {
case *Void, *Int, *Enum, *Fwd, *Float, *declTag:
// No children to traverse.
// declTags is declared as a leaf type since it's parsed into .Tags fields of other types
// during unmarshaling.
case *Pointer:
if !yield(&v.Target) {
return
}
case *Array:
if !yield(&v.Index) {
return
}
if !yield(&v.Type) {
return
}
case *Struct:
for i := range v.Members {
if !yield(&v.Members[i].Type) {
return
}
for _, t := range v.Members[i].Tags {
var tag Type = &declTag{v, t, i}
if !yield(&tag) {
return
}
}
}
tags = v.Tags
case *Union:
for i := range v.Members {
if !yield(&v.Members[i].Type) {
return
}
for _, t := range v.Members[i].Tags {
var tag Type = &declTag{v, t, i}
if !yield(&tag) {
return
}
}
}
tags = v.Tags
case *Typedef:
if !yield(&v.Type) {
return
}
tags = v.Tags
case *Volatile:
if !yield(&v.Type) {
return
}
case *Const:
if !yield(&v.Type) {
return
}
case *Restrict:
if !yield(&v.Type) {
return
}
case *Func:
if !yield(&v.Type) {
return
}
if fp, ok := v.Type.(*FuncProto); ok {
for i := range fp.Params {
if len(v.ParamTags) <= i {
continue
}
for _, t := range v.ParamTags[i] {
var tag Type = &declTag{v, t, i}
if !yield(&tag) {
return
}
}
}
}
tags = v.Tags
case *FuncProto:
if !yield(&v.Return) {
return
}
for i := range v.Params {
if !yield(&v.Params[i].Type) {
return
}
}
case *Var:
if !yield(&v.Type) {
return
}
tags = v.Tags
case *Datasec:
for i := range v.Vars {
if !yield(&v.Vars[i].Type) {
return
}
}
case *TypeTag:
if !yield(&v.Type) {
return
}
case *cycle:
// cycle has children, but we ignore them deliberately.
default:
panic(fmt.Sprintf("don't know how to walk Type %T", v))
}
for _, t := range tags {
var tag Type = &declTag{typ, t, -1}
if !yield(&tag) {
return
}
}
}
}

910
go/vendor/github.com/cilium/ebpf/btf/types.go generated vendored Normal file
View File

@@ -0,0 +1,910 @@
package btf
import (
"errors"
"fmt"
"io"
"math"
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
)
// Mirrors MAX_RESOLVE_DEPTH in libbpf.
// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761
const maxResolveDepth = 32
// TypeID identifies a type in a BTF section.
type TypeID = sys.TypeID
// Type represents a type described by BTF.
//
// Identity of Type follows the [Go specification]: two Types are considered
// equal if they have the same concrete type and the same dynamic value, aka
// they point at the same location in memory. This means that the following
// Types are considered distinct even though they have the same "shape".
//
// a := &Int{Size: 1}
// b := &Int{Size: 1}
// a != b
//
// [Go specification]: https://go.dev/ref/spec#Comparison_operators
type Type interface {
// Type can be formatted using the %s and %v verbs. %s outputs only the
// identity of the type, without any detail. %v outputs additional detail.
//
// Use the '+' flag to include the address of the type.
//
// Use the width to specify how many levels of detail to output, for example
// %1v will output detail for the root type and a short description of its
// children. %2v would output details of the root type and its children
// as well as a short description of the grandchildren.
fmt.Formatter
// Name of the type, empty for anonymous types and types that cannot
// carry a name, like Void and Pointer.
TypeName() string
// Make a copy of the type, without copying Type members.
copy() Type
// New implementations must update walkType.
}
var (
_ Type = (*Int)(nil)
_ Type = (*Struct)(nil)
_ Type = (*Union)(nil)
_ Type = (*Enum)(nil)
_ Type = (*Fwd)(nil)
_ Type = (*Func)(nil)
_ Type = (*Typedef)(nil)
_ Type = (*Var)(nil)
_ Type = (*Datasec)(nil)
_ Type = (*Float)(nil)
_ Type = (*declTag)(nil)
_ Type = (*TypeTag)(nil)
_ Type = (*cycle)(nil)
)
// Void is the unit type of BTF.
type Void struct{}
func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
func (v *Void) TypeName() string { return "" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
type IntEncoding byte
// Valid IntEncodings.
//
// These may look like they are flags, but they aren't.
const (
Unsigned IntEncoding = 0
Signed IntEncoding = 1
Char IntEncoding = 2
Bool IntEncoding = 4
)
func (ie IntEncoding) String() string {
switch ie {
case Char:
// NB: There is no way to determine signedness for char.
return "char"
case Bool:
return "bool"
case Signed:
return "signed"
case Unsigned:
return "unsigned"
default:
return fmt.Sprintf("IntEncoding(%d)", byte(ie))
}
}
// Int is an integer of a given length.
//
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
type Int struct {
Name string
// The size of the integer in bytes.
Size uint32
Encoding IntEncoding
}
func (i *Int) Format(fs fmt.State, verb rune) {
formatType(fs, verb, i, i.Encoding, "size=", i.Size)
}
func (i *Int) TypeName() string { return i.Name }
func (i *Int) size() uint32 { return i.Size }
func (i *Int) copy() Type {
cpy := *i
return &cpy
}
// Pointer is a pointer to another type.
type Pointer struct {
Target Type
}
func (p *Pointer) Format(fs fmt.State, verb rune) {
formatType(fs, verb, p, "target=", p.Target)
}
func (p *Pointer) TypeName() string { return "" }
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
}
// Array is an array with a fixed number of elements.
type Array struct {
Index Type
Type Type
Nelems uint32
}
func (arr *Array) Format(fs fmt.State, verb rune) {
formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems)
}
func (arr *Array) TypeName() string { return "" }
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
}
// Struct is a compound type of consecutive members.
type Struct struct {
Name string
// The size of the struct including padding, in bytes
Size uint32
Members []Member
Tags []string
}
func (s *Struct) Format(fs fmt.State, verb rune) {
formatType(fs, verb, s, "fields=", len(s.Members))
}
func (s *Struct) TypeName() string { return s.Name }
func (s *Struct) size() uint32 { return s.Size }
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = copyMembers(s.Members)
cpy.Tags = copyTags(cpy.Tags)
return &cpy
}
func (s *Struct) members() []Member {
return s.Members
}
// Union is a compound type where members occupy the same memory.
type Union struct {
Name string
// The size of the union including padding, in bytes.
Size uint32
Members []Member
Tags []string
}
func (u *Union) Format(fs fmt.State, verb rune) {
formatType(fs, verb, u, "fields=", len(u.Members))
}
func (u *Union) TypeName() string { return u.Name }
func (u *Union) size() uint32 { return u.Size }
func (u *Union) copy() Type {
cpy := *u
cpy.Members = copyMembers(u.Members)
cpy.Tags = copyTags(cpy.Tags)
return &cpy
}
func (u *Union) members() []Member {
return u.Members
}
func copyMembers(orig []Member) []Member {
cpy := make([]Member, len(orig))
copy(cpy, orig)
for i, member := range cpy {
cpy[i].Tags = copyTags(member.Tags)
}
return cpy
}
func copyTags(orig []string) []string {
if orig == nil { // preserve nil vs zero-len slice distinction
return nil
}
cpy := make([]string, len(orig))
copy(cpy, orig)
return cpy
}
type composite interface {
Type
members() []Member
}
var (
_ composite = (*Struct)(nil)
_ composite = (*Union)(nil)
)
// A value in bits.
type Bits uint32
// Bytes converts a bit value into bytes.
func (b Bits) Bytes() uint32 {
return uint32(b / 8)
}
// Member is part of a Struct or Union.
//
// It is not a valid Type.
type Member struct {
Name string
Type Type
Offset Bits
BitfieldSize Bits
Tags []string
}
// Enum lists possible values.
type Enum struct {
Name string
// Size of the enum value in bytes.
Size uint32
// True if the values should be interpreted as signed integers.
Signed bool
Values []EnumValue
}
func (e *Enum) Format(fs fmt.State, verb rune) {
formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values))
}
func (e *Enum) TypeName() string { return e.Name }
// EnumValue is part of an Enum
//
// Is is not a valid Type
type EnumValue struct {
Name string
Value uint64
}
func (e *Enum) size() uint32 { return e.Size }
func (e *Enum) copy() Type {
cpy := *e
cpy.Values = make([]EnumValue, len(e.Values))
copy(cpy.Values, e.Values)
return &cpy
}
// FwdKind is the type of forward declaration.
type FwdKind int
// Valid types of forward declaration.
const (
FwdStruct FwdKind = iota
FwdUnion
)
func (fk FwdKind) String() string {
switch fk {
case FwdStruct:
return "struct"
case FwdUnion:
return "union"
default:
return fmt.Sprintf("%T(%d)", fk, int(fk))
}
}
// Fwd is a forward declaration of a Type.
type Fwd struct {
Name string
Kind FwdKind
}
func (f *Fwd) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Kind)
}
func (f *Fwd) TypeName() string { return f.Name }
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
}
func (f *Fwd) matches(typ Type) bool {
if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct {
return true
}
if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion {
return true
}
return false
}
// Typedef is an alias of a Type.
type Typedef struct {
Name string
Type Type
Tags []string
}
func (td *Typedef) Format(fs fmt.State, verb rune) {
formatType(fs, verb, td, td.Type)
}
func (td *Typedef) TypeName() string { return td.Name }
func (td *Typedef) copy() Type {
cpy := *td
cpy.Tags = copyTags(td.Tags)
return &cpy
}
// Volatile is a qualifier.
type Volatile struct {
Type Type
}
func (v *Volatile) Format(fs fmt.State, verb rune) {
formatType(fs, verb, v, v.Type)
}
func (v *Volatile) TypeName() string { return "" }
func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
}
// Const is a qualifier.
type Const struct {
Type Type
}
func (c *Const) Format(fs fmt.State, verb rune) {
formatType(fs, verb, c, c.Type)
}
func (c *Const) TypeName() string { return "" }
func (c *Const) qualify() Type { return c.Type }
func (c *Const) copy() Type {
cpy := *c
return &cpy
}
// Restrict is a qualifier.
type Restrict struct {
Type Type
}
func (r *Restrict) Format(fs fmt.State, verb rune) {
formatType(fs, verb, r, r.Type)
}
func (r *Restrict) TypeName() string { return "" }
func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
}
// Func is a function definition.
type Func struct {
Name string
Type Type
Linkage FuncLinkage
Tags []string
// ParamTags holds a list of tags for each parameter of the FuncProto to which `Type` points.
// If no tags are present for any param, the outer slice will be nil/len(ParamTags)==0.
// If at least 1 param has a tag, the outer slice will have the same length as the number of params.
// The inner slice contains the tags and may be nil/len(ParamTags[i])==0 if no tags are present for that param.
ParamTags [][]string
}
func FuncMetadata(ins *asm.Instruction) *Func {
fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
return fn
}
// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
ins.Metadata.Set(funcInfoMeta{}, fn)
return ins
}
func (f *Func) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
}
func (f *Func) TypeName() string { return f.Name }
func (f *Func) copy() Type {
cpy := *f
cpy.Tags = copyTags(f.Tags)
if f.ParamTags != nil { // preserve nil vs zero-len slice distinction
ptCopy := make([][]string, len(f.ParamTags))
for i, tags := range f.ParamTags {
ptCopy[i] = copyTags(tags)
}
cpy.ParamTags = ptCopy
}
return &cpy
}
// FuncProto is a function declaration.
type FuncProto struct {
Return Type
Params []FuncParam
}
func (fp *FuncProto) Format(fs fmt.State, verb rune) {
formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
}
func (fp *FuncProto) TypeName() string { return "" }
func (fp *FuncProto) copy() Type {
cpy := *fp
cpy.Params = make([]FuncParam, len(fp.Params))
copy(cpy.Params, fp.Params)
return &cpy
}
type FuncParam struct {
Name string
Type Type
}
// Var is a global variable.
type Var struct {
Name string
Type Type
Linkage VarLinkage
Tags []string
}
func (v *Var) Format(fs fmt.State, verb rune) {
formatType(fs, verb, v, v.Linkage)
}
func (v *Var) TypeName() string { return v.Name }
func (v *Var) copy() Type {
cpy := *v
cpy.Tags = copyTags(v.Tags)
return &cpy
}
// Datasec is a global program section containing data.
type Datasec struct {
Name string
Size uint32
Vars []VarSecinfo
}
func (ds *Datasec) Format(fs fmt.State, verb rune) {
formatType(fs, verb, ds)
}
func (ds *Datasec) TypeName() string { return ds.Name }
func (ds *Datasec) size() uint32 { return ds.Size }
func (ds *Datasec) copy() Type {
cpy := *ds
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
copy(cpy.Vars, ds.Vars)
return &cpy
}
// VarSecinfo describes variable in a Datasec.
//
// It is not a valid Type.
type VarSecinfo struct {
// Var or Func.
Type Type
Offset uint32
Size uint32
}
// Float is a float of a given length.
type Float struct {
Name string
// The size of the float in bytes.
Size uint32
}
func (f *Float) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, "size=", f.Size*8)
}
func (f *Float) TypeName() string { return f.Name }
func (f *Float) size() uint32 { return f.Size }
func (f *Float) copy() Type {
cpy := *f
return &cpy
}
// declTag associates metadata with a declaration.
type declTag struct {
Type Type
Value string
// The index this tag refers to in the target type. For composite types,
// a value of -1 indicates that the tag refers to the whole type. Otherwise
// it indicates which member or argument the tag applies to.
Index int
}
func (dt *declTag) Format(fs fmt.State, verb rune) {
formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index)
}
func (dt *declTag) TypeName() string { return "" }
func (dt *declTag) copy() Type {
cpy := *dt
return &cpy
}
// TypeTag associates metadata with a pointer type. Tag types act as a custom
// modifier(const, restrict, volatile) for the target type. Unlike declTags,
// TypeTags are ordered so the order in which they are added matters.
//
// One of their uses is to mark pointers as `__kptr` meaning a pointer points
// to kernel memory. Adding a `__kptr` to pointers in map values allows you
// to store pointers to kernel memory in maps.
type TypeTag struct {
Type Type
Value string
}
func (tt *TypeTag) Format(fs fmt.State, verb rune) {
formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value)
}
func (tt *TypeTag) TypeName() string { return "" }
func (tt *TypeTag) qualify() Type { return tt.Type }
func (tt *TypeTag) copy() Type {
cpy := *tt
return &cpy
}
// cycle is a type which had to be elided since it exceeded maxTypeDepth.
type cycle struct {
root Type
}
func (c *cycle) ID() TypeID { return math.MaxUint32 }
func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
func (c *cycle) TypeName() string { return "" }
func (c *cycle) copy() Type {
cpy := *c
return &cpy
}
type sizer interface {
size() uint32
}
var (
_ sizer = (*Int)(nil)
_ sizer = (*Pointer)(nil)
_ sizer = (*Struct)(nil)
_ sizer = (*Union)(nil)
_ sizer = (*Enum)(nil)
_ sizer = (*Datasec)(nil)
)
type qualifier interface {
qualify() Type
}
var (
_ qualifier = (*Const)(nil)
_ qualifier = (*Restrict)(nil)
_ qualifier = (*Volatile)(nil)
_ qualifier = (*TypeTag)(nil)
)
var errUnsizedType = errors.New("type is unsized")
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
func Sizeof(typ Type) (int, error) {
var (
n = int64(1)
elem int64
)
for i := 0; i < maxResolveDepth; i++ {
switch v := typ.(type) {
case *Array:
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
return 0, fmt.Errorf("type %s: overflow", typ)
}
// Arrays may be of zero length, which allows
// n to be zero as well.
n *= int64(v.Nelems)
typ = v.Type
continue
case sizer:
elem = int64(v.size())
case *Typedef:
typ = v.Type
continue
case qualifier:
typ = v.qualify()
continue
default:
return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
}
if n > 0 && elem > math.MaxInt64/n {
return 0, fmt.Errorf("type %s: overflow", typ)
}
size := n * elem
if int64(int(size)) != size {
return 0, fmt.Errorf("type %s: overflow", typ)
}
return int(size), nil
}
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
}
// alignof returns the alignment of a type.
//
// Returns an error if the Type can't be aligned, like an integer with an uneven
// size. Currently only supports the subset of types necessary for bitfield
// relocations.
func alignof(typ Type) (int, error) {
var n int
switch t := UnderlyingType(typ).(type) {
case *Enum:
n = int(t.size())
case *Int:
n = int(t.Size)
case *Array:
return alignof(t.Type)
default:
return 0, fmt.Errorf("can't calculate alignment of %T", t)
}
if !internal.IsPow(n) {
return 0, fmt.Errorf("alignment value %d is not a power of two", n)
}
return n, nil
}
// Copy a Type recursively.
//
// typ may form a cycle.
func Copy(typ Type) Type {
return copyType(typ, nil, make(map[Type]Type), nil)
}
func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type {
if typ == nil {
return nil
}
cpy, ok := copies[typ]
if ok {
// This has been copied previously, no need to continue.
return cpy
}
cpy = typ.copy()
copies[typ] = cpy
if id, ok := ids[typ]; ok {
copiedIDs[cpy] = id
}
for child := range children(cpy) {
*child = copyType(*child, ids, copies, copiedIDs)
}
return cpy
}
type typeDeque = internal.Deque[*Type]
// essentialName represents the name of a BTF type stripped of any flavor
// suffixes after a ___ delimiter.
type essentialName string
// newEssentialName returns name without a ___ suffix.
//
// CO-RE has the concept of 'struct flavors', which are used to deal with
// changes in kernel data structures. Anything after three underscores
// in a type name is ignored for the purpose of finding a candidate type
// in the kernel's BTF.
func newEssentialName(name string) essentialName {
if name == "" {
return ""
}
lastIdx := strings.LastIndex(name, "___")
if lastIdx > 0 {
return essentialName(name[:lastIdx])
}
return essentialName(name)
}
// UnderlyingType skips qualifiers and Typedefs.
func UnderlyingType(typ Type) Type {
result := typ
for depth := 0; depth <= maxResolveDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
case *Typedef:
result = v.Type
default:
return result
}
}
return &cycle{typ}
}
// QualifiedType returns the type with all qualifiers removed.
func QualifiedType(typ Type) Type {
result := typ
for depth := 0; depth <= maxResolveDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
default:
return result
}
}
return &cycle{typ}
}
// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
// until it finds a T.
//
// Returns the zero value and false if there is no T or if the type is nested
// too deeply.
func As[T Type](typ Type) (T, bool) {
// NB: We can't make this function return (*T) since then
// we can't assert that a type matches an interface which
// embeds Type: as[composite](T).
for depth := 0; depth <= maxResolveDepth; depth++ {
switch v := (typ).(type) {
case T:
return v, true
case qualifier:
typ = v.qualify()
case *Typedef:
typ = v.Type
default:
goto notFound
}
}
notFound:
var zero T
return zero, false
}
type formatState struct {
fmt.State
depth int
}
// formattableType is a subset of Type, to ease unit testing of formatType.
type formattableType interface {
fmt.Formatter
TypeName() string
}
// formatType formats a type in a canonical form.
//
// Handles cyclical types by only printing cycles up to a certain depth. Elements
// in extra are separated by spaces unless the preceding element is a string
// ending in '='.
func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
if verb != 'v' && verb != 's' {
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
return
}
_, _ = io.WriteString(f, internal.GoTypeName(t))
if name := t.TypeName(); name != "" {
// Output BTF type name if present.
fmt.Fprintf(f, ":%q", name)
}
if f.Flag('+') {
// Output address if requested.
fmt.Fprintf(f, ":%#p", t)
}
if verb == 's' {
// %s omits details.
return
}
var depth int
if ps, ok := f.(*formatState); ok {
depth = ps.depth
f = ps.State
}
maxDepth, ok := f.Width()
if !ok {
maxDepth = 0
}
if depth > maxDepth {
// We've reached the maximum depth. This avoids infinite recursion even
// for cyclical types.
return
}
if len(extra) == 0 {
return
}
wantSpace := false
_, _ = io.WriteString(f, "[")
for _, arg := range extra {
if wantSpace {
_, _ = io.WriteString(f, " ")
}
switch v := arg.(type) {
case string:
_, _ = io.WriteString(f, v)
wantSpace = len(v) > 0 && v[len(v)-1] != '='
continue
case formattableType:
v.Format(&formatState{f, depth + 1}, verb)
default:
fmt.Fprint(f, arg)
}
wantSpace = true
}
_, _ = io.WriteString(f, "]")
}

780
go/vendor/github.com/cilium/ebpf/btf/unmarshal.go generated vendored Normal file
View File

@@ -0,0 +1,780 @@
package btf
import (
"bytes"
"encoding/binary"
"fmt"
"hash/maphash"
"io"
"iter"
"maps"
"math"
"slices"
"sync"
)
type decoder struct {
// Immutable fields, may be shared.
base *decoder
byteOrder binary.ByteOrder
raw []byte
strings *stringTable
// The ID for offsets[0].
firstTypeID TypeID
// Map from TypeID to offset of the marshaled data in raw. Contains an entry
// for each TypeID, including 0 aka Void. The offset for Void is invalid.
offsets []int
declTags map[TypeID][]TypeID
// An index from essentialName to TypeID.
namedTypes *fuzzyStringIndex
// Protection for mutable fields below.
mu sync.Mutex
types map[TypeID]Type
typeIDs map[Type]TypeID
legacyBitfields map[TypeID][2]Bits // offset, size
}
func newDecoder(raw []byte, bo binary.ByteOrder, strings *stringTable, base *decoder) (*decoder, error) {
firstTypeID := TypeID(0)
if base != nil {
if base.byteOrder != bo {
return nil, fmt.Errorf("can't use %v base with %v split BTF", base.byteOrder, bo)
}
if base.firstTypeID != 0 {
return nil, fmt.Errorf("can't use split BTF as base")
}
firstTypeID = TypeID(len(base.offsets))
}
var header btfType
var numTypes, numDeclTags, numNamedTypes int
for _, err := range allBtfTypeOffsets(raw, bo, &header) {
if err != nil {
return nil, err
}
numTypes++
if header.Kind() == kindDeclTag {
numDeclTags++
}
if header.NameOff != 0 {
numNamedTypes++
}
}
if firstTypeID == 0 {
// Allocate an extra slot for Void so we don't have to deal with
// constant off by one issues.
numTypes++
}
offsets := make([]int, 0, numTypes)
declTags := make(map[TypeID][]TypeID, numDeclTags)
namedTypes := newFuzzyStringIndex(numNamedTypes)
if firstTypeID == 0 {
// Add a sentinel for Void.
offsets = append(offsets, math.MaxInt)
}
id := firstTypeID + TypeID(len(offsets))
for offset := range allBtfTypeOffsets(raw, bo, &header) {
if id < firstTypeID {
return nil, fmt.Errorf("no more type IDs")
}
offsets = append(offsets, offset)
if header.Kind() == kindDeclTag {
declTags[header.Type()] = append(declTags[header.Type()], id)
}
// Build named type index.
name, err := strings.LookupBytes(header.NameOff)
if err != nil {
return nil, fmt.Errorf("lookup type name for id %v: %w", id, err)
}
if len(name) > 0 {
if i := bytes.Index(name, []byte("___")); i != -1 {
// Flavours are rare. It's cheaper to find the first index for some
// reason.
i = bytes.LastIndex(name, []byte("___"))
name = name[:i]
}
namedTypes.Add(name, id)
}
id++
}
namedTypes.Build()
return &decoder{
base,
bo,
raw,
strings,
firstTypeID,
offsets,
declTags,
namedTypes,
sync.Mutex{},
make(map[TypeID]Type),
make(map[Type]TypeID),
make(map[TypeID][2]Bits),
}, nil
}
func allBtfTypeOffsets(buf []byte, bo binary.ByteOrder, header *btfType) iter.Seq2[int, error] {
return func(yield func(int, error) bool) {
for offset := 0; offset < len(buf); {
start := offset
n, err := unmarshalBtfType(header, buf[offset:], bo)
if err != nil {
yield(-1, fmt.Errorf("unmarshal type header: %w", err))
return
}
offset += n
n, err = header.DataLen()
if err != nil {
yield(-1, err)
return
}
offset += n
if offset > len(buf) {
yield(-1, fmt.Errorf("auxiliary type data: %w", io.ErrUnexpectedEOF))
return
}
if !yield(start, nil) {
return
}
}
}
}
func rebaseDecoder(d *decoder, base *decoder) (*decoder, error) {
if d.base == nil {
return nil, fmt.Errorf("rebase split spec: not a split spec")
}
if len(d.base.raw) != len(base.raw) || (len(d.base.raw) > 0 && &d.base.raw[0] != &base.raw[0]) {
return nil, fmt.Errorf("rebase split spec: raw BTF differs")
}
return &decoder{
base,
d.byteOrder,
d.raw,
d.strings,
d.firstTypeID,
d.offsets,
d.declTags,
d.namedTypes,
sync.Mutex{},
make(map[TypeID]Type),
make(map[Type]TypeID),
make(map[TypeID][2]Bits),
}, nil
}
// Copy performs a deep copy of a decoder and its base.
func (d *decoder) Copy() *decoder {
if d == nil {
return nil
}
return d.copy(nil)
}
func (d *decoder) copy(copiedTypes map[Type]Type) *decoder {
if d == nil {
return nil
}
d.mu.Lock()
defer d.mu.Unlock()
if copiedTypes == nil {
copiedTypes = make(map[Type]Type, len(d.types))
}
types := make(map[TypeID]Type, len(d.types))
typeIDs := make(map[Type]TypeID, len(d.typeIDs))
for id, typ := range d.types {
types[id] = copyType(typ, d.typeIDs, copiedTypes, typeIDs)
}
return &decoder{
d.base.copy(copiedTypes),
d.byteOrder,
d.raw,
d.strings,
d.firstTypeID,
d.offsets,
d.declTags,
d.namedTypes,
sync.Mutex{},
types,
typeIDs,
maps.Clone(d.legacyBitfields),
}
}
// TypeID returns the ID for a Type previously obtained via [TypeByID].
func (d *decoder) TypeID(typ Type) (TypeID, error) {
if _, ok := typ.(*Void); ok {
// Equality is weird for void, since it is a zero sized type.
return 0, nil
}
d.mu.Lock()
defer d.mu.Unlock()
id, ok := d.typeIDs[typ]
if !ok {
return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
}
return id, nil
}
// TypesByName returns all types which have the given essential name.
//
// Returns ErrNotFound if no matching Type exists.
func (d *decoder) TypesByName(name essentialName) ([]Type, error) {
var types []Type
for id := range d.namedTypes.Find(string(name)) {
typ, err := d.TypeByID(id)
if err != nil {
return nil, err
}
if newEssentialName(typ.TypeName()) == name {
// Deal with hash collisions by checking against the name.
types = append(types, typ)
}
}
if len(types) == 0 {
// Return an unwrapped error because this is on the hot path
// for CO-RE.
return nil, ErrNotFound
}
return types, nil
}
// TypeByID decodes a type and any of its descendants.
func (d *decoder) TypeByID(id TypeID) (Type, error) {
d.mu.Lock()
defer d.mu.Unlock()
return d.inflateType(id)
}
func (d *decoder) inflateType(id TypeID) (typ Type, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
// err is the return value of the enclosing function, even if an explicit
// return is used.
// See https://go.dev/ref/spec#Defer_statements
if err != nil {
// Remove partially inflated type so that d.types only contains
// fully inflated ones.
delete(d.types, id)
} else {
// Populate reverse index.
d.typeIDs[typ] = id
}
}()
if id < d.firstTypeID {
return d.base.inflateType(id)
}
if id == 0 {
// Void is defined to always be type ID 0, and is thus omitted from BTF.
// Fast-path because it is looked up frequently.
return (*Void)(nil), nil
}
if typ, ok := d.types[id]; ok {
return typ, nil
}
fixup := func(id TypeID, typ *Type) {
fixup, err := d.inflateType(id)
if err != nil {
panic(err)
}
*typ = fixup
}
convertMembers := func(header *btfType, buf []byte) ([]Member, error) {
var bm btfMember
members := make([]Member, 0, header.Vlen())
for i := range header.Vlen() {
n, err := unmarshalBtfMember(&bm, buf, d.byteOrder)
if err != nil {
return nil, fmt.Errorf("unmarshal member: %w", err)
}
buf = buf[n:]
name, err := d.strings.Lookup(bm.NameOff)
if err != nil {
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
}
members = append(members, Member{
Name: name,
Offset: Bits(bm.Offset),
})
m := &members[i]
fixup(bm.Type, &m.Type)
if header.Bitfield() {
m.BitfieldSize = Bits(bm.Offset >> 24)
m.Offset &= 0xffffff
// We ignore legacy bitfield definitions if the current composite
// is a new-style bitfield. This is kind of safe since offset and
// size on the type of the member must be zero if kindFlat is set
// according to spec.
continue
}
// This may be a legacy bitfield, try to fix it up.
data, ok := d.legacyBitfields[bm.Type]
if ok {
// Bingo!
m.Offset += data[0]
m.BitfieldSize = data[1]
continue
}
}
return members, nil
}
idx := int(id - d.firstTypeID)
if idx >= len(d.offsets) {
return nil, fmt.Errorf("type id %v: %w", id, ErrNotFound)
}
offset := d.offsets[idx]
if offset >= len(d.raw) {
return nil, fmt.Errorf("offset out of bounds")
}
var (
header btfType
bInt btfInt
bArr btfArray
bVariable btfVariable
bDeclTag btfDeclTag
pos = d.raw[offset:]
)
{
if n, err := unmarshalBtfType(&header, pos, d.byteOrder); err != nil {
return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err)
} else {
pos = pos[n:]
}
name, err := d.strings.Lookup(header.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for type id %d: %w", id, err)
}
switch header.Kind() {
case kindInt:
size := header.Size()
if _, err := unmarshalBtfInt(&bInt, pos, d.byteOrder); err != nil {
return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err)
}
if bInt.Offset() > 0 || bInt.Bits().Bytes() != size {
d.legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()}
}
typ = &Int{name, header.Size(), bInt.Encoding()}
d.types[id] = typ
case kindPointer:
ptr := &Pointer{nil}
d.types[id] = ptr
fixup(header.Type(), &ptr.Target)
typ = ptr
case kindArray:
if _, err := unmarshalBtfArray(&bArr, pos, d.byteOrder); err != nil {
return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err)
}
arr := &Array{nil, nil, bArr.Nelems}
d.types[id] = arr
fixup(bArr.IndexType, &arr.Index)
fixup(bArr.Type, &arr.Type)
typ = arr
case kindStruct:
str := &Struct{name, header.Size(), nil, nil}
d.types[id] = str
typ = str
str.Members, err = convertMembers(&header, pos)
if err != nil {
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
case kindUnion:
uni := &Union{name, header.Size(), nil, nil}
d.types[id] = uni
typ = uni
uni.Members, err = convertMembers(&header, pos)
if err != nil {
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
case kindEnum:
enum := &Enum{name, header.Size(), header.Signed(), nil}
d.types[id] = enum
typ = enum
var be btfEnum
enum.Values = make([]EnumValue, 0, header.Vlen())
for i := range header.Vlen() {
n, err := unmarshalBtfEnum(&be, pos, d.byteOrder)
if err != nil {
return nil, fmt.Errorf("unmarshal btfEnum %d, id: %d: %w", i, id, err)
}
pos = pos[n:]
name, err := d.strings.Lookup(be.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
value := uint64(be.Val)
if enum.Signed {
// Sign extend values to 64 bit.
value = uint64(int32(be.Val))
}
enum.Values = append(enum.Values, EnumValue{name, value})
}
case kindForward:
typ = &Fwd{name, header.FwdKind()}
d.types[id] = typ
case kindTypedef:
typedef := &Typedef{name, nil, nil}
d.types[id] = typedef
fixup(header.Type(), &typedef.Type)
typ = typedef
case kindVolatile:
volatile := &Volatile{nil}
d.types[id] = volatile
fixup(header.Type(), &volatile.Type)
typ = volatile
case kindConst:
cnst := &Const{nil}
d.types[id] = cnst
fixup(header.Type(), &cnst.Type)
typ = cnst
case kindRestrict:
restrict := &Restrict{nil}
d.types[id] = restrict
fixup(header.Type(), &restrict.Type)
typ = restrict
case kindFunc:
fn := &Func{name, nil, header.Linkage(), nil, nil}
d.types[id] = fn
fixup(header.Type(), &fn.Type)
typ = fn
case kindFuncProto:
fp := &FuncProto{}
d.types[id] = fp
params := make([]FuncParam, 0, header.Vlen())
var bParam btfParam
for i := range header.Vlen() {
n, err := unmarshalBtfParam(&bParam, pos, d.byteOrder)
if err != nil {
return nil, fmt.Errorf("can't unmarshal btfParam %d, id: %d: %w", i, id, err)
}
pos = pos[n:]
name, err := d.strings.Lookup(bParam.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
}
param := FuncParam{Name: name}
fixup(bParam.Type, &param.Type)
params = append(params, param)
}
fixup(header.Type(), &fp.Return)
fp.Params = params
typ = fp
case kindVar:
if _, err := unmarshalBtfVariable(&bVariable, pos, d.byteOrder); err != nil {
return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err)
}
v := &Var{name, nil, VarLinkage(bVariable.Linkage), nil}
d.types[id] = v
fixup(header.Type(), &v.Type)
typ = v
case kindDatasec:
ds := &Datasec{name, header.Size(), nil}
d.types[id] = ds
vlen := header.Vlen()
vars := make([]VarSecinfo, 0, vlen)
var bSecInfo btfVarSecinfo
for i := 0; i < vlen; i++ {
n, err := unmarshalBtfVarSecInfo(&bSecInfo, pos, d.byteOrder)
if err != nil {
return nil, fmt.Errorf("can't unmarshal btfVarSecinfo %d, id: %d: %w", i, id, err)
}
pos = pos[n:]
vs := VarSecinfo{
Offset: bSecInfo.Offset,
Size: bSecInfo.Size,
}
fixup(bSecInfo.Type, &vs.Type)
vars = append(vars, vs)
}
ds.Vars = vars
typ = ds
case kindFloat:
typ = &Float{name, header.Size()}
d.types[id] = typ
case kindDeclTag:
if _, err := unmarshalBtfDeclTag(&bDeclTag, pos, d.byteOrder); err != nil {
return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err)
}
btfIndex := bDeclTag.ComponentIdx
if uint64(btfIndex) > math.MaxInt {
return nil, fmt.Errorf("type id %d: index exceeds int", id)
}
dt := &declTag{nil, name, int(int32(btfIndex))}
d.types[id] = dt
fixup(header.Type(), &dt.Type)
typ = dt
case kindTypeTag:
tt := &TypeTag{nil, name}
d.types[id] = tt
fixup(header.Type(), &tt.Type)
typ = tt
case kindEnum64:
enum := &Enum{name, header.Size(), header.Signed(), nil}
d.types[id] = enum
typ = enum
enum.Values = make([]EnumValue, 0, header.Vlen())
var bEnum64 btfEnum64
for i := range header.Vlen() {
n, err := unmarshalBtfEnum64(&bEnum64, pos, d.byteOrder)
if err != nil {
return nil, fmt.Errorf("can't unmarshal btfEnum64 %d, id: %d: %w", i, id, err)
}
pos = pos[n:]
name, err := d.strings.Lookup(bEnum64.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
}
value := (uint64(bEnum64.ValHi32) << 32) | uint64(bEnum64.ValLo32)
enum.Values = append(enum.Values, EnumValue{name, value})
}
default:
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind())
}
}
for _, tagID := range d.declTags[id] {
dtType, err := d.inflateType(tagID)
if err != nil {
return nil, err
}
dt, ok := dtType.(*declTag)
if !ok {
return nil, fmt.Errorf("type id %v: not a declTag", tagID)
}
switch t := typ.(type) {
case *Var:
if dt.Index != -1 {
return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index)
}
t.Tags = append(t.Tags, dt.Value)
case *Typedef:
if dt.Index != -1 {
return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index)
}
t.Tags = append(t.Tags, dt.Value)
case composite:
if dt.Index >= 0 {
members := t.members()
if dt.Index >= len(members) {
return nil, fmt.Errorf("type %s: component idx %d exceeds members of %s", dt, dt.Index, t)
}
members[dt.Index].Tags = append(members[dt.Index].Tags, dt.Value)
} else if dt.Index == -1 {
switch t2 := t.(type) {
case *Struct:
t2.Tags = append(t2.Tags, dt.Value)
case *Union:
t2.Tags = append(t2.Tags, dt.Value)
}
} else {
return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t)
}
case *Func:
fp, ok := t.Type.(*FuncProto)
if !ok {
return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type)
}
// Ensure the number of argument tag lists equals the number of arguments
if len(t.ParamTags) == 0 {
t.ParamTags = make([][]string, len(fp.Params))
}
if dt.Index >= 0 {
if dt.Index >= len(fp.Params) {
return nil, fmt.Errorf("type %s: component idx %d exceeds params of %s", dt, dt.Index, t)
}
t.ParamTags[dt.Index] = append(t.ParamTags[dt.Index], dt.Value)
} else if dt.Index == -1 {
t.Tags = append(t.Tags, dt.Value)
} else {
return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t)
}
default:
return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t)
}
}
return typ, nil
}
// An index from string to TypeID.
//
// Fuzzy because it may return false positive matches.
type fuzzyStringIndex struct {
seed maphash.Seed
entries []fuzzyStringIndexEntry
}
func newFuzzyStringIndex(capacity int) *fuzzyStringIndex {
return &fuzzyStringIndex{
maphash.MakeSeed(),
make([]fuzzyStringIndexEntry, 0, capacity),
}
}
// Add a string to the index.
//
// Calling the method with identical arguments will create duplicate entries.
func (idx *fuzzyStringIndex) Add(name []byte, id TypeID) {
hash := uint32(maphash.Bytes(idx.seed, name))
idx.entries = append(idx.entries, newFuzzyStringIndexEntry(hash, id))
}
// Build the index.
//
// Must be called after [Add] and before [Match].
func (idx *fuzzyStringIndex) Build() {
slices.Sort(idx.entries)
}
// Find TypeIDs which may match the name.
//
// May return false positives, but is guaranteed to not have false negatives.
//
// You must call [Build] at least once before calling this method.
func (idx *fuzzyStringIndex) Find(name string) iter.Seq[TypeID] {
return func(yield func(TypeID) bool) {
hash := uint32(maphash.String(idx.seed, name))
// We match only on the first 32 bits here, so ignore found.
i, _ := slices.BinarySearch(idx.entries, fuzzyStringIndexEntry(hash)<<32)
for i := i; i < len(idx.entries); i++ {
if idx.entries[i].hash() != hash {
break
}
if !yield(idx.entries[i].id()) {
return
}
}
}
}
// Tuple mapping the hash of an essential name to a type.
//
// Encoded in an uint64 so that it implements cmp.Ordered.
type fuzzyStringIndexEntry uint64
func newFuzzyStringIndexEntry(hash uint32, id TypeID) fuzzyStringIndexEntry {
return fuzzyStringIndexEntry(hash)<<32 | fuzzyStringIndexEntry(id)
}
func (e fuzzyStringIndexEntry) hash() uint32 {
return uint32(e >> 32)
}
func (e fuzzyStringIndexEntry) id() TypeID {
return TypeID(e)
}

26
go/vendor/github.com/cilium/ebpf/btf/workarounds.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
package btf
// datasecResolveWorkaround ensures that certain vars in a Datasec are added
// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
//
// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
for _, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
continue
}
switch v.Type.(type) {
case *Typedef, *Volatile, *Const, *Restrict, *TypeTag:
// NB: We must never call Add on a Datasec, otherwise we risk
// infinite recursion.
_, err := b.Add(v.Type)
if err != nil {
return err
}
}
}
return nil
}

1067
go/vendor/github.com/cilium/ebpf/collection.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

9
go/vendor/github.com/cilium/ebpf/collection_other.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build !windows
package ebpf
import "github.com/cilium/ebpf/internal"
func loadCollectionFromNativeImage(_ string) (*Collection, error) {
return nil, internal.ErrNotSupportedOnOS
}

136
go/vendor/github.com/cilium/ebpf/collection_windows.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
package ebpf
import (
"errors"
"fmt"
"unsafe"
"github.com/cilium/ebpf/internal/efw"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
func loadCollectionFromNativeImage(file string) (_ *Collection, err error) {
mapFds := make([]efw.FD, 16)
programFds := make([]efw.FD, 16)
var maps map[string]*Map
var programs map[string]*Program
defer func() {
if err == nil {
return
}
for _, fd := range append(mapFds, programFds...) {
// efW never uses fd 0.
if fd != 0 {
_ = efw.EbpfCloseFd(int(fd))
}
}
for _, m := range maps {
_ = m.Close()
}
for _, p := range programs {
_ = p.Close()
}
}()
nMaps, nPrograms, err := efw.EbpfObjectLoadNativeFds(file, mapFds, programFds)
if errors.Is(err, efw.EBPF_NO_MEMORY) && (nMaps > len(mapFds) || nPrograms > len(programFds)) {
mapFds = make([]efw.FD, nMaps)
programFds = make([]efw.FD, nPrograms)
nMaps, nPrograms, err = efw.EbpfObjectLoadNativeFds(file, mapFds, programFds)
}
if err != nil {
return nil, err
}
mapFds = mapFds[:nMaps]
programFds = programFds[:nPrograms]
// The maximum length of a name is only 16 bytes on Linux, longer names
// are truncated. This is not a problem when loading from an ELF, since
// we get the full object name from the symbol table.
// When loading a native image we do not have this luxury. Use an efW native
// API to retrieve up to 64 bytes of the object name.
maps = make(map[string]*Map, len(mapFds))
for _, raw := range mapFds {
fd, err := sys.NewFD(int(raw))
if err != nil {
return nil, err
}
m, mapErr := newMapFromFD(fd)
if mapErr != nil {
_ = fd.Close()
return nil, mapErr
}
var efwMapInfo efw.BpfMapInfo
size := uint32(unsafe.Sizeof(efwMapInfo))
_, err = efw.EbpfObjectGetInfoByFd(m.FD(), unsafe.Pointer(&efwMapInfo), &size)
if err != nil {
_ = m.Close()
return nil, err
}
if size >= uint32(unsafe.Offsetof(efwMapInfo.Name)+unsafe.Sizeof(efwMapInfo.Name)) {
m.name = unix.ByteSliceToString(efwMapInfo.Name[:])
}
if m.name == "" {
_ = m.Close()
return nil, fmt.Errorf("unnamed map")
}
if _, ok := maps[m.name]; ok {
return nil, fmt.Errorf("duplicate map with the same name: %s", m.name)
}
maps[m.name] = m
}
programs = make(map[string]*Program, len(programFds))
for _, raw := range programFds {
fd, err := sys.NewFD(int(raw))
if err != nil {
return nil, err
}
program, err := newProgramFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
var efwProgInfo efw.BpfProgInfo
size := uint32(unsafe.Sizeof(efwProgInfo))
_, err = efw.EbpfObjectGetInfoByFd(program.FD(), unsafe.Pointer(&efwProgInfo), &size)
if err != nil {
_ = program.Close()
return nil, err
}
if size >= uint32(unsafe.Offsetof(efwProgInfo.Name)+unsafe.Sizeof(efwProgInfo.Name)) {
program.name = unix.ByteSliceToString(efwProgInfo.Name[:])
}
if program.name == "" {
_ = program.Close()
return nil, fmt.Errorf("unnamed program")
}
if _, ok := programs[program.name]; ok {
_ = program.Close()
return nil, fmt.Errorf("duplicate program with the same name: %s", program.name)
}
programs[program.name] = program
}
return &Collection{programs, maps, nil}, nil
}

17
go/vendor/github.com/cilium/ebpf/cpu.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
package ebpf
// PossibleCPU returns the max number of CPUs a system may possibly have
// Logical CPU numbers must be of the form 0-n
func PossibleCPU() (int, error) {
return possibleCPU()
}
// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if
// the error is non-nil.
func MustPossibleCPU() int {
cpus, err := PossibleCPU()
if err != nil {
panic(err)
}
return cpus
}

13
go/vendor/github.com/cilium/ebpf/cpu_other.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
//go:build !windows
package ebpf
import (
"sync"
"github.com/cilium/ebpf/internal/linux"
)
var possibleCPU = sync.OnceValues(func() (int, error) {
return linux.ParseCPUsFromFile("/sys/devices/system/cpu/possible")
})

11
go/vendor/github.com/cilium/ebpf/cpu_windows.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package ebpf
import (
"sync"
"golang.org/x/sys/windows"
)
var possibleCPU = sync.OnceValues(func() (int, error) {
return int(windows.GetMaximumProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil
})

25
go/vendor/github.com/cilium/ebpf/doc.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Package ebpf is a toolkit for working with eBPF programs.
//
// eBPF programs are small snippets of code which are executed directly
// in a VM in the Linux kernel, which makes them very fast and flexible.
// Many Linux subsystems now accept eBPF programs. This makes it possible
// to implement highly application specific logic inside the kernel,
// without having to modify the actual kernel itself.
//
// This package is designed for long-running processes which
// want to use eBPF to implement part of their application logic. It has no
// run-time dependencies outside of the library and the Linux kernel itself.
// eBPF code should be compiled ahead of time using clang, and shipped with
// your application as any other resource.
//
// Use the link subpackage to attach a loaded program to a hook in the kernel.
//
// Note that losing all references to Map and Program resources will cause
// their underlying file descriptors to be closed, potentially removing those
// objects from the kernel. Always retain a reference by e.g. deferring a
// Close() of a Collection or LoadAndAssign object until application exit.
//
// Special care needs to be taken when handling maps of type ProgramArray,
// as the kernel erases its contents when the last userspace or bpffs
// reference disappears, regardless of the map being in active use.
package ebpf

1492
go/vendor/github.com/cilium/ebpf/elf_reader.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

111
go/vendor/github.com/cilium/ebpf/elf_sections.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Code generated by internal/cmd/gensections.awk; DO NOT EDIT.
package ebpf
// Code in this file is derived from libbpf, available under BSD-2-Clause.
import "github.com/cilium/ebpf/internal/sys"
var elfSectionDefs = []libbpfElfSectionDef{
{"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
{"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE},
{"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE},
{"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
{"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
{"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
{"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
{"kprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_SESSION, _SEC_NONE},
{"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
{"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
{"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
{"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
{"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
{"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT},
{"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE},
{"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
{"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
{"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
{"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
{"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
{"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
{"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE},
{"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE},
{"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE},
{"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
{"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
{"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
{"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
{"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
{"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
{"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF},
{"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF},
{"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF},
{"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF},
{"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
{"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
{"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
{"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF},
{"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF},
{"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
{"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF},
{"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF},
{"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
{"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE},
{"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
{"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE},
{"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
{"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE},
{"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS},
{"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT},
{"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE},
{"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE},
{"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE},
{"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE},
{"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE},
{"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT},
{"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT},
{"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT},
{"sk_skb/verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_VERDICT, _SEC_ATTACHABLE_OPT},
{"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE},
{"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT},
{"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT},
{"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT},
{"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT},
{"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT},
{"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE},
{"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE},
{"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE},
{"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT},
{"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE},
{"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE},
{"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE},
{"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE},
{"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE},
{"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE},
{"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE},
{"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE},
{"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE},
{"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE},
{"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE},
{"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE},
{"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE},
{"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE},
{"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE},
{"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE},
{"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE},
{"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE},
{"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE},
{"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE},
{"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE},
{"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE},
{"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT},
{"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE},
{"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE},
{"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE},
{"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE},
}

939
go/vendor/github.com/cilium/ebpf/info.go generated vendored Normal file
View File

@@ -0,0 +1,939 @@
package ebpf
import (
"bufio"
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"time"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/platform"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// The *Info structs expose metadata about a program or map. Most
// fields are exposed via a getter:
//
// func (*MapInfo) ID() (MapID, bool)
//
// This is because the metadata available changes based on kernel version.
// The second boolean return value indicates whether a particular field is
// available on the current kernel.
//
// Always add new metadata as such a getter, unless you can somehow get the
// value of the field on all supported kernels. Also document which version
// a particular field first appeared in.
//
// Some metadata is a buffer which needs additional parsing. In this case,
// store the undecoded data in the Info struct and provide a getter which
// decodes it when necessary. See ProgramInfo.Instructions for an example.
// MapInfo describes a map.
type MapInfo struct {
// Type of the map.
Type MapType
// KeySize is the size of the map key in bytes.
KeySize uint32
// ValueSize is the size of the map value in bytes.
ValueSize uint32
// MaxEntries is the maximum number of entries the map can hold. Its meaning
// is map-specific.
MaxEntries uint32
// Flags used during map creation.
Flags uint32
// Name as supplied by user space at load time. Available from 4.15.
Name string
id MapID
btf btf.ID
mapExtra uint64
memlock uint64
frozen bool
}
// minimalMapInfoFromFd queries the minimum information needed to create a Map
// based on a file descriptor. This requires the map type, key/value sizes,
// maxentries and flags.
//
// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and
// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov
// 2017.
//
// Requires at least Linux 4.13.
func minimalMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
var info sys.MapInfo
if err := sys.ObjInfo(fd, &info); err != nil {
return nil, fmt.Errorf("getting object info: %w", err)
}
typ, err := MapTypeForPlatform(platform.Native, info.Type)
if err != nil {
return nil, fmt.Errorf("map type: %w", err)
}
return &MapInfo{
Type: typ,
KeySize: info.KeySize,
ValueSize: info.ValueSize,
MaxEntries: info.MaxEntries,
Flags: uint32(info.MapFlags),
Name: unix.ByteSliceToString(info.Name[:]),
}, nil
}
// newMapInfoFromFd queries map information about the given fd. [sys.ObjInfo] is
// attempted first, supplementing any missing values with information from
// /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as ErrNotSupported
// from reading fdinfo (indicating the file exists, but no fields of interest
// were found). If both fail, an error is always returned.
func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
var info sys.MapInfo
err1 := sys.ObjInfo(fd, &info)
// EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue
// with fdinfo if that's the case.
if err1 != nil && !errors.Is(err1, unix.EINVAL) {
return nil, fmt.Errorf("getting object info: %w", err1)
}
typ, err := MapTypeForPlatform(platform.Native, info.Type)
if err != nil {
return nil, fmt.Errorf("map type: %w", err)
}
mi := &MapInfo{
typ,
info.KeySize,
info.ValueSize,
info.MaxEntries,
uint32(info.MapFlags),
unix.ByteSliceToString(info.Name[:]),
MapID(info.Id),
btf.ID(info.BtfId),
info.MapExtra,
0,
false,
}
// Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields
// like memlock and frozen that are not present in OBJ_INFO.
err2 := readMapInfoFromProc(fd, mi)
if err2 != nil && !errors.Is(err2, ErrNotSupported) {
return nil, fmt.Errorf("getting map info from fdinfo: %w", err2)
}
if err1 != nil && err2 != nil {
return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2)
}
return mi, nil
}
// readMapInfoFromProc queries map information about the given fd from
// /proc/self/fdinfo. It only writes data into fields that have a zero value.
func readMapInfoFromProc(fd *sys.FD, mi *MapInfo) error {
var mapType uint32
err := scanFdInfo(fd, map[string]interface{}{
"map_type": &mapType,
"map_id": &mi.id,
"key_size": &mi.KeySize,
"value_size": &mi.ValueSize,
"max_entries": &mi.MaxEntries,
"map_flags": &mi.Flags,
"map_extra": &mi.mapExtra,
"memlock": &mi.memlock,
"frozen": &mi.frozen,
})
if err != nil {
return err
}
if mi.Type == 0 {
mi.Type, err = MapTypeForPlatform(platform.Linux, mapType)
if err != nil {
return fmt.Errorf("map type: %w", err)
}
}
return nil
}
// ID returns the map ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (mi *MapInfo) ID() (MapID, bool) {
return mi.id, mi.id > 0
}
// BTFID returns the BTF ID associated with the Map.
//
// The ID is only valid as long as the associated Map is kept alive.
// Available from 4.18.
//
// The bool return value indicates whether this optional field is available and
// populated. (The field may be available but not populated if the kernel
// supports the field but the Map was loaded without BTF information.)
func (mi *MapInfo) BTFID() (btf.ID, bool) {
return mi.btf, mi.btf > 0
}
// MapExtra returns an opaque field whose meaning is map-specific.
//
// Available from 5.16.
//
// The bool return value indicates whether this optional field is available and
// populated, if it was specified during Map creation.
func (mi *MapInfo) MapExtra() (uint64, bool) {
return mi.mapExtra, mi.mapExtra > 0
}
// Memlock returns an approximate number of bytes allocated to this map.
//
// Available from 4.10.
//
// The bool return value indicates whether this optional field is available.
func (mi *MapInfo) Memlock() (uint64, bool) {
return mi.memlock, mi.memlock > 0
}
// Frozen indicates whether [Map.Freeze] was called on this map. If true,
// modifications from user space are not allowed.
//
// Available from 5.2. Requires access to procfs.
//
// If the kernel doesn't support map freezing, this field will always be false.
func (mi *MapInfo) Frozen() bool {
return mi.frozen
}
// ProgramStats contains runtime statistics for a single [Program], returned by
// [Program.Stats].
//
// Will contain mostly zero values if the collection of statistics is not
// enabled, see [EnableStats].
type ProgramStats struct {
// Total accumulated runtime of the Program.
//
// Requires at least Linux 5.8.
Runtime time.Duration
// Total number of times the Program has executed.
//
// Requires at least Linux 5.8.
RunCount uint64
// Total number of times the program was not executed due to recursion. This
// can happen when another bpf program is already running on the cpu, when bpf
// program execution is interrupted, for example.
//
// Requires at least Linux 5.12.
RecursionMisses uint64
}
func newProgramStatsFromFd(fd *sys.FD) (*ProgramStats, error) {
var info sys.ProgInfo
if err := sys.ObjInfo(fd, &info); err != nil {
return nil, fmt.Errorf("getting program info: %w", err)
}
return &ProgramStats{
Runtime: time.Duration(info.RunTimeNs),
RunCount: info.RunCnt,
RecursionMisses: info.RecursionMisses,
}, nil
}
// programJitedInfo holds information about JITed info of a program.
type programJitedInfo struct {
// ksyms holds the ksym addresses of the BPF program, including those of its
// subprograms.
//
// Available from 4.18.
ksyms []uint64
numKsyms uint32
// insns holds the JITed machine native instructions of the program,
// including those of its subprograms.
//
// Available from 4.13.
insns []byte
numInsns uint32
// lineInfos holds the JITed line infos, which are kernel addresses.
//
// Available from 5.0.
lineInfos []uint64
numLineInfos uint32
// lineInfoRecSize is the size of a single line info record.
//
// Available from 5.0.
lineInfoRecSize uint32
// funcLens holds the insns length of each function.
//
// Available from 4.18.
funcLens []uint32
numFuncLens uint32
}
// ProgramInfo describes a Program's immutable metadata. For runtime statistics,
// see [ProgramStats].
type ProgramInfo struct {
Type ProgramType
id ProgramID
// Truncated hash of the BPF bytecode. Available from 4.13.
Tag string
// Name as supplied by user space at load time. Available from 4.15.
Name string
createdByUID uint32
haveCreatedByUID bool
btf btf.ID
loadTime time.Duration
maps []MapID
insns []byte
jitedSize uint32
verifiedInstructions uint32
jitedInfo programJitedInfo
lineInfos []byte
numLineInfos uint32
funcInfos []byte
numFuncInfos uint32
memlock uint64
}
// minimalProgramFromFd queries the minimum information needed to create a
// Program based on a file descriptor, requiring at least the program type.
//
// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and
// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov
// 2017.
//
// Requires at least Linux 4.13.
func minimalProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
var info sys.ProgInfo
if err := sys.ObjInfo(fd, &info); err != nil {
return nil, fmt.Errorf("getting object info: %w", err)
}
typ, err := ProgramTypeForPlatform(platform.Native, info.Type)
if err != nil {
return nil, fmt.Errorf("program type: %w", err)
}
return &ProgramInfo{
Type: typ,
Name: unix.ByteSliceToString(info.Name[:]),
}, nil
}
// newProgramInfoFromFd queries program information about the given fd.
//
// [sys.ObjInfo] is attempted first, supplementing any missing values with
// information from /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as
// ErrNotSupported from reading fdinfo (indicating the file exists, but no
// fields of interest were found). If both fail, an error is always returned.
func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
var info sys.ProgInfo
err1 := sys.ObjInfo(fd, &info)
// EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue
// with fdinfo if that's the case.
if err1 != nil && !errors.Is(err1, unix.EINVAL) {
return nil, fmt.Errorf("getting object info: %w", err1)
}
typ, err := ProgramTypeForPlatform(platform.Native, info.Type)
if err != nil {
return nil, fmt.Errorf("program type: %w", err)
}
pi := ProgramInfo{
Type: typ,
id: ProgramID(info.Id),
Tag: hex.EncodeToString(info.Tag[:]),
Name: unix.ByteSliceToString(info.Name[:]),
btf: btf.ID(info.BtfId),
jitedSize: info.JitedProgLen,
loadTime: time.Duration(info.LoadTime),
verifiedInstructions: info.VerifiedInsns,
}
// Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields
// like memlock that is not present in OBJ_INFO.
err2 := readProgramInfoFromProc(fd, &pi)
if err2 != nil && !errors.Is(err2, ErrNotSupported) {
return nil, fmt.Errorf("getting map info from fdinfo: %w", err2)
}
if err1 != nil && err2 != nil {
return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2)
}
if platform.IsWindows && info.Tag == [8]uint8{} {
// Windows doesn't support the tag field, clear it for now.
pi.Tag = ""
}
// Start with a clean struct for the second call, otherwise we may get EFAULT.
var info2 sys.ProgInfo
makeSecondCall := false
if info.NrMapIds > 0 {
pi.maps = make([]MapID, info.NrMapIds)
info2.NrMapIds = info.NrMapIds
info2.MapIds = sys.SlicePointer(pi.maps)
makeSecondCall = true
} else if haveProgramInfoMapIDs() == nil {
// This program really has no associated maps.
pi.maps = make([]MapID, 0)
} else {
// The kernel doesn't report associated maps.
pi.maps = nil
}
// createdByUID and NrMapIds were introduced in the same kernel version.
if pi.maps != nil && platform.IsLinux {
pi.createdByUID = info.CreatedByUid
pi.haveCreatedByUID = true
}
if info.XlatedProgLen > 0 {
pi.insns = make([]byte, info.XlatedProgLen)
info2.XlatedProgLen = info.XlatedProgLen
info2.XlatedProgInsns = sys.SlicePointer(pi.insns)
makeSecondCall = true
}
if info.NrLineInfo > 0 {
pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo)
info2.LineInfo = sys.SlicePointer(pi.lineInfos)
info2.LineInfoRecSize = btf.LineInfoSize
info2.NrLineInfo = info.NrLineInfo
pi.numLineInfos = info.NrLineInfo
makeSecondCall = true
}
if info.NrFuncInfo > 0 {
pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo)
info2.FuncInfo = sys.SlicePointer(pi.funcInfos)
info2.FuncInfoRecSize = btf.FuncInfoSize
info2.NrFuncInfo = info.NrFuncInfo
pi.numFuncInfos = info.NrFuncInfo
makeSecondCall = true
}
pi.jitedInfo.lineInfoRecSize = info.JitedLineInfoRecSize
if info.JitedProgLen > 0 {
pi.jitedInfo.numInsns = info.JitedProgLen
pi.jitedInfo.insns = make([]byte, info.JitedProgLen)
info2.JitedProgLen = info.JitedProgLen
info2.JitedProgInsns = sys.SlicePointer(pi.jitedInfo.insns)
makeSecondCall = true
}
if info.NrJitedFuncLens > 0 {
pi.jitedInfo.numFuncLens = info.NrJitedFuncLens
pi.jitedInfo.funcLens = make([]uint32, info.NrJitedFuncLens)
info2.NrJitedFuncLens = info.NrJitedFuncLens
info2.JitedFuncLens = sys.SlicePointer(pi.jitedInfo.funcLens)
makeSecondCall = true
}
if info.NrJitedLineInfo > 0 {
pi.jitedInfo.numLineInfos = info.NrJitedLineInfo
pi.jitedInfo.lineInfos = make([]uint64, info.NrJitedLineInfo)
info2.NrJitedLineInfo = info.NrJitedLineInfo
info2.JitedLineInfo = sys.SlicePointer(pi.jitedInfo.lineInfos)
info2.JitedLineInfoRecSize = info.JitedLineInfoRecSize
makeSecondCall = true
}
if info.NrJitedKsyms > 0 {
pi.jitedInfo.numKsyms = info.NrJitedKsyms
pi.jitedInfo.ksyms = make([]uint64, info.NrJitedKsyms)
info2.JitedKsyms = sys.SlicePointer(pi.jitedInfo.ksyms)
info2.NrJitedKsyms = info.NrJitedKsyms
makeSecondCall = true
}
if makeSecondCall {
if err := sys.ObjInfo(fd, &info2); err != nil {
return nil, err
}
}
return &pi, nil
}
func readProgramInfoFromProc(fd *sys.FD, pi *ProgramInfo) error {
var progType uint32
err := scanFdInfo(fd, map[string]interface{}{
"prog_type": &progType,
"prog_tag": &pi.Tag,
"memlock": &pi.memlock,
})
if errors.Is(err, ErrNotSupported) && !errors.Is(err, internal.ErrNotSupportedOnOS) {
return &internal.UnsupportedFeatureError{
Name: "reading program info from /proc/self/fdinfo",
MinimumVersion: internal.Version{4, 10, 0},
}
}
if err != nil {
return err
}
pi.Type, err = ProgramTypeForPlatform(platform.Linux, progType)
if err != nil {
return fmt.Errorf("program type: %w", err)
}
return nil
}
// ID returns the program ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) ID() (ProgramID, bool) {
return pi.id, pi.id > 0
}
// CreatedByUID returns the Uid that created the program.
//
// Available from 4.15.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) CreatedByUID() (uint32, bool) {
return pi.createdByUID, pi.haveCreatedByUID
}
// BTFID returns the BTF ID associated with the program.
//
// The ID is only valid as long as the associated program is kept alive.
// Available from 5.0.
//
// The bool return value indicates whether this optional field is available and
// populated. (The field may be available but not populated if the kernel
// supports the field but the program was loaded without BTF information.)
func (pi *ProgramInfo) BTFID() (btf.ID, bool) {
return pi.btf, pi.btf > 0
}
// btfSpec returns the BTF spec associated with the program.
func (pi *ProgramInfo) btfSpec() (*btf.Spec, error) {
id, ok := pi.BTFID()
if !ok {
return nil, fmt.Errorf("program created without BTF or unsupported kernel: %w", ErrNotSupported)
}
h, err := btf.NewHandleFromID(id)
if err != nil {
return nil, fmt.Errorf("get BTF handle: %w", err)
}
defer h.Close()
spec, err := h.Spec(nil)
if err != nil {
return nil, fmt.Errorf("get BTF spec: %w", err)
}
return spec, nil
}
// LineInfos returns the BTF line information of the program.
//
// Available from 5.0.
//
// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns
// ErrNotSupported if the program was created without BTF or if the kernel
// doesn't support the field.
func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) {
if len(pi.lineInfos) == 0 {
return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
}
spec, err := pi.btfSpec()
if err != nil {
return nil, err
}
return btf.LoadLineInfos(
bytes.NewReader(pi.lineInfos),
internal.NativeEndian,
pi.numLineInfos,
spec,
)
}
// Instructions returns the 'xlated' instruction stream of the program
// after it has been verified and rewritten by the kernel. These instructions
// cannot be loaded back into the kernel as-is, this is mainly used for
// inspecting loaded programs for troubleshooting, dumping, etc.
//
// For example, map accesses are made to reference their kernel map IDs,
// not the FDs they had when the program was inserted. Note that before
// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated
// instructions were not sanitized, making the output even less reusable
// and less likely to round-trip or evaluate to the same program Tag.
//
// The first instruction is marked as a symbol using the Program's name.
//
// If available, the instructions will be annotated with metadata from the
// BTF. This includes line information and function information. Reading
// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is
// unavailable, the instructions will be returned without metadata.
//
// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions.
// Requires CAP_SYS_ADMIN for instructions with metadata.
func (pi *ProgramInfo) Instructions() (asm.Instructions, error) {
if platform.IsWindows && len(pi.insns) == 0 {
return nil, fmt.Errorf("read instructions: %w", internal.ErrNotSupportedOnOS)
}
// If the calling process is not BPF-capable or if the kernel doesn't
// support getting xlated instructions, the field will be zero.
if len(pi.insns) == 0 {
return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
}
r := bytes.NewReader(pi.insns)
insns, err := asm.AppendInstructions(nil, r, internal.NativeEndian, platform.Native)
if err != nil {
return nil, fmt.Errorf("unmarshaling instructions: %w", err)
}
if pi.btf != 0 {
btfh, err := btf.NewHandleFromID(pi.btf)
if err != nil {
// Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM.
// Ignore it and fall back to instructions without metadata.
if !errors.Is(err, unix.EPERM) {
return nil, fmt.Errorf("unable to get BTF handle: %w", err)
}
}
// If we have a BTF handle, we can use it to assign metadata to the instructions.
if btfh != nil {
defer btfh.Close()
spec, err := btfh.Spec(nil)
if err != nil {
return nil, fmt.Errorf("unable to get BTF spec: %w", err)
}
lineInfos, err := btf.LoadLineInfos(
bytes.NewReader(pi.lineInfos),
internal.NativeEndian,
pi.numLineInfos,
spec,
)
if err != nil {
return nil, fmt.Errorf("parse line info: %w", err)
}
funcInfos, err := btf.LoadFuncInfos(
bytes.NewReader(pi.funcInfos),
internal.NativeEndian,
pi.numFuncInfos,
spec,
)
if err != nil {
return nil, fmt.Errorf("parse func info: %w", err)
}
btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{})
}
}
fn := btf.FuncMetadata(&insns[0])
name := pi.Name
if fn != nil {
name = fn.Name
}
insns[0] = insns[0].WithSymbol(name)
return insns, nil
}
// JitedSize returns the size of the program's JIT-compiled machine code in bytes, which is the
// actual code executed on the host's CPU. This field requires the BPF JIT compiler to be enabled.
//
// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent.
func (pi *ProgramInfo) JitedSize() (uint32, error) {
if pi.jitedSize == 0 {
return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported)
}
return pi.jitedSize, nil
}
// TranslatedSize returns the size of the program's translated instructions in bytes, after it has
// been verified and rewritten by the kernel.
//
// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent.
func (pi *ProgramInfo) TranslatedSize() (int, error) {
insns := len(pi.insns)
if insns == 0 {
return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
}
return insns, nil
}
// MapIDs returns the maps related to the program.
//
// Available from 4.15.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) MapIDs() ([]MapID, bool) {
return pi.maps, pi.maps != nil
}
// LoadTime returns when the program was loaded since boot time.
//
// Available from 4.15.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) LoadTime() (time.Duration, bool) {
// loadTime and NrMapIds were introduced in the same kernel version.
return pi.loadTime, pi.loadTime > 0
}
// VerifiedInstructions returns the number verified instructions in the program.
//
// Available from 5.16.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) VerifiedInstructions() (uint32, bool) {
return pi.verifiedInstructions, pi.verifiedInstructions > 0
}
// JitedKsymAddrs returns the ksym addresses of the BPF program, including its
// subprograms. The addresses correspond to their symbols in /proc/kallsyms.
//
// Available from 4.18. Note that before 5.x, this field can be empty for
// programs without subprograms (bpf2bpf calls).
//
// The bool return value indicates whether this optional field is available.
//
// When a kernel address can't fit into uintptr (which is usually the case when
// running 32 bit program on a 64 bit kernel), this returns an empty slice and
// a false.
func (pi *ProgramInfo) JitedKsymAddrs() ([]uintptr, bool) {
ksyms := make([]uintptr, 0, len(pi.jitedInfo.ksyms))
if cap(ksyms) == 0 {
return ksyms, false
}
// Check if a kernel address fits into uintptr (it might not when
// using a 32 bit binary on a 64 bit kernel). This check should work
// with any kernel address, since they have 1s at the highest bits.
if a := pi.jitedInfo.ksyms[0]; uint64(uintptr(a)) != a {
return nil, false
}
for _, ksym := range pi.jitedInfo.ksyms {
ksyms = append(ksyms, uintptr(ksym))
}
return ksyms, true
}
// JitedInsns returns the JITed machine native instructions of the program.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) JitedInsns() ([]byte, bool) {
return pi.jitedInfo.insns, len(pi.jitedInfo.insns) > 0
}
// JitedLineInfos returns the JITed line infos of the program.
//
// Available from 5.0.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) JitedLineInfos() ([]uint64, bool) {
return pi.jitedInfo.lineInfos, len(pi.jitedInfo.lineInfos) > 0
}
// JitedFuncLens returns the insns length of each function in the JITed program.
//
// Available from 4.18.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) {
return pi.jitedInfo.funcLens, len(pi.jitedInfo.funcLens) > 0
}
// FuncInfos returns the offset and function information of all (sub)programs in
// a BPF program.
//
// Available from 5.0.
//
// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns
// ErrNotSupported if the program was created without BTF or if the kernel
// doesn't support the field.
func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) {
if len(pi.funcInfos) == 0 {
return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
}
spec, err := pi.btfSpec()
if err != nil {
return nil, err
}
return btf.LoadFuncInfos(
bytes.NewReader(pi.funcInfos),
internal.NativeEndian,
pi.numFuncInfos,
spec,
)
}
// ProgramInfo returns an approximate number of bytes allocated to this program.
//
// Available from 4.10.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) Memlock() (uint64, bool) {
return pi.memlock, pi.memlock > 0
}
func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error {
if platform.IsWindows {
return fmt.Errorf("read fdinfo: %w", internal.ErrNotSupportedOnOS)
}
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int()))
if err != nil {
return err
}
defer fh.Close()
if err := scanFdInfoReader(fh, fields); err != nil {
return fmt.Errorf("%s: %w", fh.Name(), err)
}
return nil
}
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
var (
scanner = bufio.NewScanner(r)
scanned int
reader bytes.Reader
)
for scanner.Scan() {
key, rest, found := bytes.Cut(scanner.Bytes(), []byte(":"))
if !found {
// Line doesn't contain a colon, skip.
continue
}
field, ok := fields[string(key)]
if !ok {
continue
}
// If field already contains a non-zero value, don't overwrite it with fdinfo.
if !zero(field) {
scanned++
continue
}
// Cut the \t following the : as well as any potential trailing whitespace.
rest = bytes.TrimSpace(rest)
reader.Reset(rest)
if n, err := fmt.Fscan(&reader, field); err != nil || n != 1 {
return fmt.Errorf("can't parse field %s: %v", key, err)
}
scanned++
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("scanning fdinfo: %w", err)
}
if len(fields) > 0 && scanned == 0 {
return ErrNotSupported
}
return nil
}
func zero(arg any) bool {
v := reflect.ValueOf(arg)
// Unwrap pointers and interfaces.
for v.Kind() == reflect.Pointer ||
v.Kind() == reflect.Interface {
v = v.Elem()
}
return v.IsZero()
}
// EnableStats starts collecting runtime statistics of eBPF programs, like the
// amount of program executions and the cumulative runtime.
//
// Specify a BPF_STATS_* constant to select which statistics to collect, like
// [unix.BPF_STATS_RUN_TIME]. Closing the returned [io.Closer] will stop
// collecting statistics.
//
// Collecting statistics may have a performance impact.
//
// Requires at least Linux 5.8.
func EnableStats(which uint32) (io.Closer, error) {
fd, err := sys.EnableStats(&sys.EnableStatsAttr{
Type: which,
})
if err != nil {
return nil, err
}
return fd, nil
}
var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", func() error {
if platform.IsWindows {
// We only support efW versions which have this feature, no need to probe.
return nil
}
prog, err := progLoad(asm.Instructions{
asm.LoadImm(asm.R0, 0, asm.DWord),
asm.Return(),
}, SocketFilter, "MIT")
if err != nil {
return err
}
defer prog.Close()
err = sys.ObjInfo(prog, &sys.ProgInfo{
// NB: Don't need to allocate MapIds since the program isn't using
// any maps.
NrMapIds: 1,
})
if errors.Is(err, unix.EINVAL) {
// Most likely the syscall doesn't exist.
return internal.ErrNotSupported
}
if errors.Is(err, unix.E2BIG) {
// We've hit check_uarg_tail_zero on older kernels.
return internal.ErrNotSupported
}
return err
}, "4.15", "windows:0.21.0")

88
go/vendor/github.com/cilium/ebpf/internal/deque.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
package internal
import "math/bits"
// Deque implements a double ended queue.
type Deque[T any] struct {
elems []T
read, write uint64
mask uint64
}
// Reset clears the contents of the deque while retaining the backing buffer.
func (dq *Deque[T]) Reset() {
var zero T
for i := dq.read; i < dq.write; i++ {
dq.elems[i&dq.mask] = zero
}
dq.read, dq.write = 0, 0
}
func (dq *Deque[T]) Empty() bool {
return dq.read == dq.write
}
// Push adds an element to the end.
func (dq *Deque[T]) Push(e T) {
dq.Grow(1)
dq.elems[dq.write&dq.mask] = e
dq.write++
}
// Shift returns the first element or the zero value.
func (dq *Deque[T]) Shift() T {
var zero T
if dq.Empty() {
return zero
}
index := dq.read & dq.mask
t := dq.elems[index]
dq.elems[index] = zero
dq.read++
return t
}
// Pop returns the last element or the zero value.
func (dq *Deque[T]) Pop() T {
var zero T
if dq.Empty() {
return zero
}
dq.write--
index := dq.write & dq.mask
t := dq.elems[index]
dq.elems[index] = zero
return t
}
// Grow the deque's capacity, if necessary, to guarantee space for another n
// elements.
func (dq *Deque[T]) Grow(n int) {
have := dq.write - dq.read
need := have + uint64(n)
if need < have {
panic("overflow")
}
if uint64(len(dq.elems)) >= need {
return
}
// Round up to the new power of two which is at least 8.
// See https://jameshfisher.com/2018/03/30/round-up-power-2/
capacity := max(1<<(64-bits.LeadingZeros64(need-1)), 8)
elems := make([]T, have, capacity)
pivot := dq.read & dq.mask
copied := copy(elems, dq.elems[pivot:])
copy(elems[copied:], dq.elems[:pivot])
dq.elems = elems[:capacity]
dq.mask = uint64(capacity) - 1
dq.read, dq.write = 0, have
}

65
go/vendor/github.com/cilium/ebpf/internal/efw/enums.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
//go:build windows
package efw
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
/*
Converts an attach type enum into a GUID.
ebpf_result_t ebpf_get_ebpf_attach_type(
bpf_attach_type_t bpf_attach_type,
_Out_ ebpf_attach_type_t* ebpf_attach_type_t *ebpf_attach_type)
*/
var ebpfGetEbpfAttachTypeProc = newProc("ebpf_get_ebpf_attach_type")
func EbpfGetEbpfAttachType(attachType uint32) (windows.GUID, error) {
addr, err := ebpfGetEbpfAttachTypeProc.Find()
if err != nil {
return windows.GUID{}, err
}
var attachTypeGUID windows.GUID
err = errorResult(syscall.SyscallN(addr,
uintptr(attachType),
uintptr(unsafe.Pointer(&attachTypeGUID)),
))
return attachTypeGUID, err
}
/*
Retrieve a program type given a GUID.
bpf_prog_type_t ebpf_get_bpf_program_type(_In_ const ebpf_program_type_t* program_type)
*/
var ebpfGetBpfProgramTypeProc = newProc("ebpf_get_bpf_program_type")
func EbpfGetBpfProgramType(programType windows.GUID) (uint32, error) {
addr, err := ebpfGetBpfProgramTypeProc.Find()
if err != nil {
return 0, err
}
return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&programType)))), nil
}
/*
Retrieve an attach type given a GUID.
bpf_attach_type_t ebpf_get_bpf_attach_type(_In_ const ebpf_attach_type_t* ebpf_attach_type)
*/
var ebpfGetBpfAttachTypeProc = newProc("ebpf_get_bpf_attach_type")
func EbpfGetBpfAttachType(attachType windows.GUID) (uint32, error) {
addr, err := ebpfGetBpfAttachTypeProc.Find()
if err != nil {
return 0, err
}
return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&attachType)))), nil
}

View File

@@ -0,0 +1,155 @@
//go:build windows
package efw
import (
"errors"
"fmt"
"os"
"syscall"
"testing"
"golang.org/x/sys/windows"
)
func init() {
if !testing.Testing() {
return
}
if isDebuggerPresent() {
return
}
if err := configureCRTErrorReporting(); err != nil {
fmt.Fprintln(os.Stderr, "WARNING: Could not configure CRT error reporting, tests may hang:", err)
}
}
var errErrorReportingAlreadyConfigured = errors.New("error reporting already configured")
// Configure built-in error reporting of the C runtime library.
//
// The C runtime emits assertion failures into a graphical message box by default.
// This causes a hang in CI environments. This function configures the CRT to
// log to stderr instead.
func configureCRTErrorReporting() error {
const ucrtDebug = "ucrtbased.dll"
// Constants from crtdbg.h
//
// See https://doxygen.reactos.org/da/d40/crt_2crtdbg_8h_source.html
const (
_CRT_ERROR = 1
_CRT_ASSERT = 2
_CRTDBG_MODE_FILE = 0x1
_CRTDBG_MODE_WNDW = 0x4
_CRTDBG_HFILE_ERROR = -2
_CRTDBG_FILE_STDERR = -4
)
// Load the efW API to trigger loading the CRT. This may fail, in which case
// we can't figure out which CRT is being used.
// In that case we rely on the error bubbling up via some other path.
_ = module.Load()
ucrtHandle, err := syscall.UTF16PtrFromString(ucrtDebug)
if err != nil {
return err
}
var handle windows.Handle
err = windows.GetModuleHandleEx(0, ucrtHandle, &handle)
if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) {
// Loading the ebpf api did not pull in the debug UCRT, so there is
// nothing to configure.
return nil
} else if err != nil {
return err
}
defer windows.FreeLibrary(handle)
setReportModeAddr, err := windows.GetProcAddress(handle, "_CrtSetReportMode")
if err != nil {
return err
}
setReportMode := func(reportType int, reportMode int) (int, error) {
// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170
r1, _, err := syscall.SyscallN(setReportModeAddr, uintptr(reportType), uintptr(reportMode))
if int(r1) == -1 {
return 0, fmt.Errorf("set report mode for type %d: %w", reportType, err)
}
return int(r1), nil
}
setReportFileAddr, err := windows.GetProcAddress(handle, "_CrtSetReportFile")
if err != nil {
return err
}
setReportFile := func(reportType int, reportFile int) (int, error) {
// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportfile?view=msvc-170
r1, _, err := syscall.SyscallN(setReportFileAddr, uintptr(reportType), uintptr(reportFile))
if int(r1) == _CRTDBG_HFILE_ERROR {
return 0, fmt.Errorf("set report file for type %d: %w", reportType, err)
}
return int(r1), nil
}
reportToFile := func(reportType, defaultMode int) error {
oldMode, err := setReportMode(reportType, _CRTDBG_MODE_FILE)
if err != nil {
return err
}
if oldMode != defaultMode {
// Attempt to restore old mode if it was different from the expected default.
_, _ = setReportMode(reportType, oldMode)
return errErrorReportingAlreadyConfigured
}
oldFile, err := setReportFile(reportType, _CRTDBG_FILE_STDERR)
if err != nil {
return err
}
if oldFile != -1 {
// Attempt to restore old file if it was different from the expected default.
_, _ = setReportFile(reportType, oldFile)
return errErrorReportingAlreadyConfigured
}
return nil
}
// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170#remarks
// for defaults.
if err := reportToFile(_CRT_ASSERT, _CRTDBG_MODE_WNDW); err != nil {
return err
}
if err := reportToFile(_CRT_ERROR, _CRTDBG_MODE_WNDW); err != nil {
return err
}
return nil
}
// isDebuggerPresent returns true if the current process is being debugged.
//
// See https://learn.microsoft.com/en-us/windows/win32/api/debugapi/nf-debugapi-isdebuggerpresent
func isDebuggerPresent() bool {
kernel32Handle, err := windows.LoadLibrary("kernel32.dll")
if err != nil {
return false
}
isDebuggerPresentAddr, err := windows.GetProcAddress(kernel32Handle, "IsDebuggerPresent")
if err != nil {
return false
}
r1, _, _ := syscall.SyscallN(isDebuggerPresentAddr)
return r1 != 0
}

34
go/vendor/github.com/cilium/ebpf/internal/efw/fd.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
//go:build windows
package efw
import (
"syscall"
"unsafe"
)
// ebpf_result_t ebpf_close_fd(fd_t fd)
var ebpfCloseFdProc = newProc("ebpf_close_fd")
func EbpfCloseFd(fd int) error {
addr, err := ebpfCloseFdProc.Find()
if err != nil {
return err
}
return errorResult(syscall.SyscallN(addr, uintptr(fd)))
}
// ebpf_result_t ebpf_duplicate_fd(fd_t fd, _Out_ fd_t* dup)
var ebpfDuplicateFdProc = newProc("ebpf_duplicate_fd")
func EbpfDuplicateFd(fd int) (int, error) {
addr, err := ebpfDuplicateFdProc.Find()
if err != nil {
return -1, err
}
var dup FD
err = errorResult(syscall.SyscallN(addr, uintptr(fd), uintptr(unsafe.Pointer(&dup))))
return int(dup), err
}

View File

@@ -0,0 +1,36 @@
//go:build windows
// Package efw contains support code for eBPF for Windows.
package efw
import (
"golang.org/x/sys/windows"
)
// module is the global handle for the eBPF for Windows user-space API.
var module = windows.NewLazyDLL("ebpfapi.dll")
// FD is the equivalent of fd_t.
//
// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L24
type FD int32
// Size is the equivalent of size_t.
//
// This is correct on amd64 and arm64 according to tests on godbolt.org.
type Size uint64
// Int is the equivalent of int on MSVC (am64, arm64) and MinGW (gcc, clang).
type Int int32
// ObjectType is the equivalent of ebpf_object_type_t.
//
// See https://github.com/microsoft/ebpf-for-windows/blob/44f5de09ec0f3f7ad176c00a290c1cb7106cdd5e/include/ebpf_core_structs.h#L41
type ObjectType uint32
const (
EBPF_OBJECT_UNKNOWN ObjectType = iota
EBPF_OBJECT_MAP
EBPF_OBJECT_LINK
EBPF_OBJECT_PROGRAM
)

View File

@@ -0,0 +1,44 @@
//go:build windows
package efw
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
/*
ebpf_result_t ebpf_object_load_native_by_fds(
_In_z_ const char* file_name,
_Inout_ size_t* count_of_maps,
_Out_writes_opt_(count_of_maps) fd_t* map_fds,
_Inout_ size_t* count_of_programs,
_Out_writes_opt_(count_of_programs) fd_t* program_fds)
*/
var ebpfObjectLoadNativeByFdsProc = newProc("ebpf_object_load_native_by_fds")
func EbpfObjectLoadNativeFds(fileName string, mapFds []FD, programFds []FD) (int, int, error) {
addr, err := ebpfObjectLoadNativeByFdsProc.Find()
if err != nil {
return 0, 0, err
}
fileBytes, err := windows.ByteSliceFromString(fileName)
if err != nil {
return 0, 0, err
}
countOfMaps := Size(len(mapFds))
countOfPrograms := Size(len(programFds))
err = errorResult(syscall.SyscallN(addr,
uintptr(unsafe.Pointer(&fileBytes[0])),
uintptr(unsafe.Pointer(&countOfMaps)),
uintptr(unsafe.Pointer(&mapFds[0])),
uintptr(unsafe.Pointer(&countOfPrograms)),
uintptr(unsafe.Pointer(&programFds[0])),
))
return int(countOfMaps), int(countOfPrograms), err
}

117
go/vendor/github.com/cilium/ebpf/internal/efw/object.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
//go:build windows
package efw
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
// https://github.com/microsoft/ebpf-for-windows/blob/9d9003c39c3fd75be5225ac0fce30077d6bf0604/include/ebpf_core_structs.h#L15
const _EBPF_MAX_PIN_PATH_LENGTH = 256
/*
Retrieve object info and type from a fd.
ebpf_result_t ebpf_object_get_info_by_fd(
fd_t bpf_fd,
_Inout_updates_bytes_to_opt_(*info_size, *info_size) void* info,
_Inout_opt_ uint32_t* info_size,
_Out_opt_ ebpf_object_type_t* type)
*/
var ebpfObjectGetInfoByFdProc = newProc("ebpf_object_get_info_by_fd")
func EbpfObjectGetInfoByFd(fd int, info unsafe.Pointer, info_size *uint32) (ObjectType, error) {
addr, err := ebpfObjectGetInfoByFdProc.Find()
if err != nil {
return 0, err
}
var objectType ObjectType
err = errorResult(syscall.SyscallN(addr,
uintptr(fd),
uintptr(info),
uintptr(unsafe.Pointer(info_size)),
uintptr(unsafe.Pointer(&objectType)),
))
return objectType, err
}
// ebpf_result_t ebpf_object_unpin(_In_z_ const char* path)
var ebpfObjectUnpinProc = newProc("ebpf_object_unpin")
func EbpfObjectUnpin(path string) error {
addr, err := ebpfObjectUnpinProc.Find()
if err != nil {
return err
}
pathBytes, err := windows.ByteSliceFromString(path)
if err != nil {
return err
}
return errorResult(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&pathBytes[0]))))
}
/*
Retrieve the next pinned object path.
ebpf_result_t ebpf_get_next_pinned_object_path(
_In_opt_z_ const char* start_path,
_Out_writes_z_(next_path_len) char* next_path,
size_t next_path_len,
_Inout_opt_ ebpf_object_type_t* type)
*/
var ebpfGetNextPinnedObjectPath = newProc("ebpf_get_next_pinned_object_path")
func EbpfGetNextPinnedObjectPath(startPath string, objectType ObjectType) (string, ObjectType, error) {
addr, err := ebpfGetNextPinnedObjectPath.Find()
if err != nil {
return "", 0, err
}
ptr, err := windows.BytePtrFromString(startPath)
if err != nil {
return "", 0, err
}
tmp := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH)
err = errorResult(syscall.SyscallN(addr,
uintptr(unsafe.Pointer(ptr)),
uintptr(unsafe.Pointer(&tmp[0])),
uintptr(len(tmp)),
uintptr(unsafe.Pointer(&objectType)),
))
return windows.ByteSliceToString(tmp), objectType, err
}
/*
Canonicalize a path using filesystem canonicalization rules.
_Must_inspect_result_ ebpf_result_t
ebpf_canonicalize_pin_path(_Out_writes_(output_size) char* output, size_t output_size, _In_z_ const char* input)
*/
var ebpfCanonicalizePinPath = newProc("ebpf_canonicalize_pin_path")
func EbpfCanonicalizePinPath(input string) (string, error) {
addr, err := ebpfCanonicalizePinPath.Find()
if err != nil {
return "", err
}
inputBytes, err := windows.ByteSliceFromString(input)
if err != nil {
return "", err
}
output := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH)
err = errorResult(syscall.SyscallN(addr,
uintptr(unsafe.Pointer(&output[0])),
uintptr(len(output)),
uintptr(unsafe.Pointer(&inputBytes[0])),
))
return windows.ByteSliceToString(output), err
}

50
go/vendor/github.com/cilium/ebpf/internal/efw/proc.go generated vendored Normal file
View File

@@ -0,0 +1,50 @@
//go:build windows
package efw
import (
"errors"
"fmt"
"syscall"
"golang.org/x/sys/windows"
)
/*
The BPF syscall wrapper which is ABI compatible with Linux.
int bpf(int cmd, union bpf_attr* attr, unsigned int size)
*/
var BPF = newProc("bpf")
type proc struct {
proc *windows.LazyProc
}
func newProc(name string) proc {
return proc{module.NewProc(name)}
}
func (p proc) Find() (uintptr, error) {
if err := p.proc.Find(); err != nil {
if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) {
return 0, fmt.Errorf("load %s: not found", module.Name)
}
return 0, err
}
return p.proc.Addr(), nil
}
// uint32Result wraps a function which returns a uint32_t.
func uint32Result(r1, _ uintptr, _ syscall.Errno) uint32 {
return uint32(r1)
}
// errorResult wraps a function which returns ebpf_result_t.
func errorResult(r1, _ uintptr, errNo syscall.Errno) error {
err := resultToError(Result(r1))
if err != nil && errNo != 0 {
return fmt.Errorf("%w (errno: %v)", err, errNo)
}
return err
}

View File

@@ -0,0 +1,39 @@
//go:build windows
package efw
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
/*
Attach a program.
ebpf_result_t ebpf_program_attach_by_fds(
fd_t program_fd,
_In_opt_ const ebpf_attach_type_t* attach_type,
_In_reads_bytes_opt_(attach_parameters_size) void* attach_parameters,
size_t attach_parameters_size,
_Out_ fd_t* link)
*/
var ebpfProgramAttachByFdsProc = newProc("ebpf_program_attach_by_fds")
func EbpfProgramAttachFds(fd int, attachType windows.GUID, params unsafe.Pointer, params_size uintptr) (int, error) {
addr, err := ebpfProgramAttachByFdsProc.Find()
if err != nil {
return 0, err
}
var link FD
err = errorResult(syscall.SyscallN(addr,
uintptr(fd),
uintptr(unsafe.Pointer(&attachType)),
uintptr(params),
params_size,
uintptr(unsafe.Pointer(&link)),
))
return int(link), err
}

View File

@@ -0,0 +1,57 @@
//go:build windows
package efw
// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_result.h
type Result int32
//go:generate go run golang.org/x/tools/cmd/stringer@latest -tags windows -output result_string_windows.go -type=Result
const (
EBPF_SUCCESS Result = iota
EBPF_VERIFICATION_FAILED
EBPF_JIT_COMPILATION_FAILED
EBPF_PROGRAM_LOAD_FAILED
EBPF_INVALID_FD
EBPF_INVALID_OBJECT
EBPF_INVALID_ARGUMENT
EBPF_OBJECT_NOT_FOUND
EBPF_OBJECT_ALREADY_EXISTS
EBPF_FILE_NOT_FOUND
EBPF_ALREADY_PINNED
EBPF_NOT_PINNED
EBPF_NO_MEMORY
EBPF_PROGRAM_TOO_LARGE
EBPF_RPC_EXCEPTION
EBPF_ALREADY_INITIALIZED
EBPF_ELF_PARSING_FAILED
EBPF_FAILED
EBPF_OPERATION_NOT_SUPPORTED
EBPF_KEY_NOT_FOUND
EBPF_ACCESS_DENIED
EBPF_BLOCKED_BY_POLICY
EBPF_ARITHMETIC_OVERFLOW
EBPF_EXTENSION_FAILED_TO_LOAD
EBPF_INSUFFICIENT_BUFFER
EBPF_NO_MORE_KEYS
EBPF_KEY_ALREADY_EXISTS
EBPF_NO_MORE_TAIL_CALLS
EBPF_PENDING
EBPF_OUT_OF_SPACE
EBPF_CANCELED
EBPF_INVALID_POINTER
EBPF_TIMEOUT
EBPF_STALE_ID
EBPF_INVALID_STATE
)
func (r Result) Error() string {
return r.String()
}
func resultToError(res Result) error {
if res == EBPF_SUCCESS {
return nil
}
return res
}

View File

@@ -0,0 +1,57 @@
// Code generated by "stringer -tags windows -output result_string_windows.go -type=Result"; DO NOT EDIT.
package efw
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[EBPF_SUCCESS-0]
_ = x[EBPF_VERIFICATION_FAILED-1]
_ = x[EBPF_JIT_COMPILATION_FAILED-2]
_ = x[EBPF_PROGRAM_LOAD_FAILED-3]
_ = x[EBPF_INVALID_FD-4]
_ = x[EBPF_INVALID_OBJECT-5]
_ = x[EBPF_INVALID_ARGUMENT-6]
_ = x[EBPF_OBJECT_NOT_FOUND-7]
_ = x[EBPF_OBJECT_ALREADY_EXISTS-8]
_ = x[EBPF_FILE_NOT_FOUND-9]
_ = x[EBPF_ALREADY_PINNED-10]
_ = x[EBPF_NOT_PINNED-11]
_ = x[EBPF_NO_MEMORY-12]
_ = x[EBPF_PROGRAM_TOO_LARGE-13]
_ = x[EBPF_RPC_EXCEPTION-14]
_ = x[EBPF_ALREADY_INITIALIZED-15]
_ = x[EBPF_ELF_PARSING_FAILED-16]
_ = x[EBPF_FAILED-17]
_ = x[EBPF_OPERATION_NOT_SUPPORTED-18]
_ = x[EBPF_KEY_NOT_FOUND-19]
_ = x[EBPF_ACCESS_DENIED-20]
_ = x[EBPF_BLOCKED_BY_POLICY-21]
_ = x[EBPF_ARITHMETIC_OVERFLOW-22]
_ = x[EBPF_EXTENSION_FAILED_TO_LOAD-23]
_ = x[EBPF_INSUFFICIENT_BUFFER-24]
_ = x[EBPF_NO_MORE_KEYS-25]
_ = x[EBPF_KEY_ALREADY_EXISTS-26]
_ = x[EBPF_NO_MORE_TAIL_CALLS-27]
_ = x[EBPF_PENDING-28]
_ = x[EBPF_OUT_OF_SPACE-29]
_ = x[EBPF_CANCELED-30]
_ = x[EBPF_INVALID_POINTER-31]
_ = x[EBPF_TIMEOUT-32]
_ = x[EBPF_STALE_ID-33]
_ = x[EBPF_INVALID_STATE-34]
}
const _Result_name = "EBPF_SUCCESSEBPF_VERIFICATION_FAILEDEBPF_JIT_COMPILATION_FAILEDEBPF_PROGRAM_LOAD_FAILEDEBPF_INVALID_FDEBPF_INVALID_OBJECTEBPF_INVALID_ARGUMENTEBPF_OBJECT_NOT_FOUNDEBPF_OBJECT_ALREADY_EXISTSEBPF_FILE_NOT_FOUNDEBPF_ALREADY_PINNEDEBPF_NOT_PINNEDEBPF_NO_MEMORYEBPF_PROGRAM_TOO_LARGEEBPF_RPC_EXCEPTIONEBPF_ALREADY_INITIALIZEDEBPF_ELF_PARSING_FAILEDEBPF_FAILEDEBPF_OPERATION_NOT_SUPPORTEDEBPF_KEY_NOT_FOUNDEBPF_ACCESS_DENIEDEBPF_BLOCKED_BY_POLICYEBPF_ARITHMETIC_OVERFLOWEBPF_EXTENSION_FAILED_TO_LOADEBPF_INSUFFICIENT_BUFFEREBPF_NO_MORE_KEYSEBPF_KEY_ALREADY_EXISTSEBPF_NO_MORE_TAIL_CALLSEBPF_PENDINGEBPF_OUT_OF_SPACEEBPF_CANCELEDEBPF_INVALID_POINTEREBPF_TIMEOUTEBPF_STALE_IDEBPF_INVALID_STATE"
var _Result_index = [...]uint16{0, 12, 36, 63, 87, 102, 121, 142, 163, 189, 208, 227, 242, 256, 278, 296, 320, 343, 354, 382, 400, 418, 440, 464, 493, 517, 534, 557, 580, 592, 609, 622, 642, 654, 667, 685}
func (i Result) String() string {
if i < 0 || i >= Result(len(_Result_index)-1) {
return "Result(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Result_name[_Result_index[i]:_Result_index[i+1]]
}

View File

@@ -0,0 +1,36 @@
//go:build windows
package efw
import "golang.org/x/sys/windows"
// https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L366
const _BPF_OBJ_NAME_LEN = 64
// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L372-L386
type BpfMapInfo struct {
_ uint32 ///< Map ID.
_ uint32 ///< Type of map.
_ uint32 ///< Size in bytes of a map key.
_ uint32 ///< Size in bytes of a map value.
_ uint32 ///< Maximum number of entries allowed in the map.
Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name.
_ uint32 ///< Map flags.
_ uint32 ///< ID of inner map template.
_ uint32 ///< Number of pinned paths.
}
// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L396-L410
type BpfProgInfo struct {
_ uint32 ///< Program ID.
_ uint32 ///< Program type, if a cross-platform type.
_ uint32 ///< Number of maps associated with this program.
_ uintptr ///< Pointer to caller-allocated array to fill map IDs into.
Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name.
_ windows.GUID ///< Program type UUID.
_ windows.GUID ///< Attach type UUID.
_ uint32 ///< Number of pinned paths.
_ uint32 ///< Number of attached links.
}

102
go/vendor/github.com/cilium/ebpf/internal/elf.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
package internal
import (
"debug/elf"
"fmt"
"io"
)
type SafeELFFile struct {
*elf.File
}
// NewSafeELFFile reads an ELF safely.
//
// Any panic during parsing is turned into an error. This is necessary since
// there are a bunch of unfixed bugs in debug/elf.
//
// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
defer func() {
r := recover()
if r == nil {
return
}
safe = nil
err = fmt.Errorf("reading ELF file panicked: %s", r)
}()
file, err := elf.NewFile(r)
if err != nil {
return nil, err
}
return &SafeELFFile{file}, nil
}
// OpenSafeELFFile reads an ELF from a file.
//
// It works like NewSafeELFFile, with the exception that safe.Close will
// close the underlying file.
func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) {
defer func() {
r := recover()
if r == nil {
return
}
safe = nil
err = fmt.Errorf("reading ELF file panicked: %s", r)
}()
file, err := elf.Open(path)
if err != nil {
return nil, err
}
return &SafeELFFile{file}, nil
}
// Symbols is the safe version of elf.File.Symbols.
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF symbols panicked: %s", r)
}()
syms, err = se.File.Symbols()
return
}
// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
}()
syms, err = se.File.DynamicSymbols()
return
}
// SectionsByType returns all sections in the file with the specified section type.
func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section {
sections := make([]*elf.Section, 0, 1)
for _, section := range se.Sections {
if section.Type == typ {
sections = append(sections, section)
}
}
return sections
}

View File

@@ -0,0 +1,9 @@
//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
package internal
import "encoding/binary"
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian = binary.BigEndian

View File

@@ -0,0 +1,9 @@
//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 || wasm
package internal
import "encoding/binary"
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian = binary.LittleEndian

179
go/vendor/github.com/cilium/ebpf/internal/errors.go generated vendored Normal file
View File

@@ -0,0 +1,179 @@
package internal
import (
"bytes"
"fmt"
"io"
"strings"
)
// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier
// log buffer.
//
// The default error output is a summary of the full log. The latter can be
// accessed via VerifierError.Log or by formatting the error, see Format.
func ErrorWithLog(source string, err error, log []byte) *VerifierError {
const whitespace = "\t\r\v\n "
// Convert verifier log C string by truncating it on the first 0 byte
// and trimming trailing whitespace before interpreting as a Go string.
if i := bytes.IndexByte(log, 0); i != -1 {
log = log[:i]
}
log = bytes.Trim(log, whitespace)
if len(log) == 0 {
return &VerifierError{source, err, nil}
}
logLines := bytes.Split(log, []byte{'\n'})
lines := make([]string, 0, len(logLines))
for _, line := range logLines {
// Don't remove leading white space on individual lines. We rely on it
// when outputting logs.
lines = append(lines, string(bytes.TrimRight(line, whitespace)))
}
return &VerifierError{source, err, lines}
}
// VerifierError includes information from the eBPF verifier.
//
// It summarises the log output, see Format if you want to output the full contents.
type VerifierError struct {
source string
// The error which caused this error.
Cause error
// The verifier output split into lines.
Log []string
}
func (le *VerifierError) Unwrap() error {
return le.Cause
}
func (le *VerifierError) Error() string {
log := le.Log
if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") {
// Get rid of "processed 39 insns (limit 1000000) ..." from summary.
log = log[:n-1]
}
var b strings.Builder
fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error())
n := len(log)
if n == 0 {
return b.String()
}
lines := log[n-1:]
if n >= 2 && includePreviousLine(log[n-1]) {
// Add one more line of context if it aids understanding the error.
lines = log[n-2:]
}
for _, line := range lines {
b.WriteString(": ")
b.WriteString(strings.TrimSpace(line))
}
omitted := len(le.Log) - len(lines)
if omitted > 0 {
fmt.Fprintf(&b, " (%d line(s) omitted)", omitted)
}
return b.String()
}
// includePreviousLine returns true if the given line likely is better
// understood with additional context from the preceding line.
func includePreviousLine(line string) bool {
// We need to find a good trade off between understandable error messages
// and too much complexity here. Checking the string prefix is ok, requiring
// regular expressions to do it is probably overkill.
if strings.HasPrefix(line, "\t") {
// [13] STRUCT drm_rect size=16 vlen=4
// \tx1 type_id=2
return true
}
if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' {
// 0: (95) exit
// R0 !read_ok
return true
}
if strings.HasPrefix(line, "invalid bpf_context access") {
// 0: (79) r6 = *(u64 *)(r1 +0)
// func '__x64_sys_recvfrom' arg0 type FWD is not a struct
// invalid bpf_context access off=0 size=8
return true
}
return false
}
// Format the error.
//
// Understood verbs are %s and %v, which are equivalent to calling Error(). %v
// allows outputting additional information using the following flags:
//
// %+<width>v: Output the first <width> lines, or all lines if no width is given.
// %-<width>v: Output the last <width> lines, or all lines if no width is given.
//
// Use width to specify how many lines to output. Use the '-' flag to output
// lines from the end of the log instead of the beginning.
func (le *VerifierError) Format(f fmt.State, verb rune) {
switch verb {
case 's':
_, _ = io.WriteString(f, le.Error())
case 'v':
n, haveWidth := f.Width()
if !haveWidth || n > len(le.Log) {
n = len(le.Log)
}
if !f.Flag('+') && !f.Flag('-') {
if haveWidth {
_, _ = io.WriteString(f, "%!v(BADWIDTH)")
return
}
_, _ = io.WriteString(f, le.Error())
return
}
if f.Flag('+') && f.Flag('-') {
_, _ = io.WriteString(f, "%!v(BADFLAG)")
return
}
fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error())
omitted := len(le.Log) - n
lines := le.Log[:n]
if f.Flag('-') {
// Print last instead of first lines.
lines = le.Log[len(le.Log)-n:]
if omitted > 0 {
fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
}
}
for _, line := range lines {
fmt.Fprintf(f, "\n\t%s", line)
}
if !f.Flag('-') {
if omitted > 0 {
fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
}
}
default:
fmt.Fprintf(f, "%%!%c(BADVERB)", verb)
}
}

223
go/vendor/github.com/cilium/ebpf/internal/feature.go generated vendored Normal file
View File

@@ -0,0 +1,223 @@
package internal
import (
"errors"
"fmt"
"runtime"
"sync"
"github.com/cilium/ebpf/internal/platform"
)
// ErrNotSupported indicates that a feature is not supported.
var ErrNotSupported = errors.New("not supported")
// ErrNotSupportedOnOS indicates that a feature is not supported on the current
// operating system.
var ErrNotSupportedOnOS = fmt.Errorf("%w on %s", ErrNotSupported, runtime.GOOS)
// UnsupportedFeatureError is returned by FeatureTest() functions.
type UnsupportedFeatureError struct {
// The minimum version required for this feature.
//
// On Linux this refers to the mainline kernel version, on other platforms
// to the version of the runtime.
//
// Used for the error string, and for sanity checking during testing.
MinimumVersion Version
// The name of the feature that isn't supported.
Name string
}
func (ufe *UnsupportedFeatureError) Error() string {
if ufe.MinimumVersion.Unspecified() {
return fmt.Sprintf("%s not supported", ufe.Name)
}
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
}
// Is indicates that UnsupportedFeatureError is ErrNotSupported.
func (ufe *UnsupportedFeatureError) Is(target error) bool {
return target == ErrNotSupported
}
// FeatureTest caches the result of a [FeatureTestFn].
//
// Fields should not be modified after creation.
type FeatureTest struct {
// The name of the feature being detected.
Name string
// Version in the form Major.Minor[.Patch].
Version string
// The feature test itself.
Fn FeatureTestFn
mu sync.RWMutex
done bool
result error
}
// FeatureTestFn is used to determine whether the kernel supports
// a certain feature.
//
// The return values have the following semantics:
//
// err == ErrNotSupported: the feature is not available
// err == nil: the feature is available
// err != nil: the test couldn't be executed
type FeatureTestFn func() error
// NewFeatureTest is a convenient way to create a single [FeatureTest].
//
// versions specifies in which version of a BPF runtime a feature appeared.
// The format is "GOOS:Major.Minor[.Patch]". GOOS may be omitted when targeting
// Linux. Returns [ErrNotSupportedOnOS] if there is no version specified for the
// current OS.
func NewFeatureTest(name string, fn FeatureTestFn, versions ...string) func() error {
version, err := platform.SelectVersion(versions)
if err != nil {
return func() error { return err }
}
if version == "" {
return func() error {
// We don't return an UnsupportedFeatureError here, since that will
// trigger version checks which don't make sense.
return fmt.Errorf("%s: %w", name, ErrNotSupportedOnOS)
}
}
ft := &FeatureTest{
Name: name,
Version: version,
Fn: fn,
}
return ft.execute
}
// execute the feature test.
//
// The result is cached if the test is conclusive.
//
// See [FeatureTestFn] for the meaning of the returned error.
func (ft *FeatureTest) execute() error {
ft.mu.RLock()
result, done := ft.result, ft.done
ft.mu.RUnlock()
if done {
return result
}
ft.mu.Lock()
defer ft.mu.Unlock()
// The test may have been executed by another caller while we were
// waiting to acquire ft.mu.
if ft.done {
return ft.result
}
err := ft.Fn()
if err == nil {
ft.done = true
return nil
}
if errors.Is(err, ErrNotSupported) {
var v Version
if ft.Version != "" {
v, err = NewVersion(ft.Version)
if err != nil {
return fmt.Errorf("feature %s: %w", ft.Name, err)
}
}
ft.done = true
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: ft.Name,
}
return ft.result
}
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("detect support for %s: %w", ft.Name, err)
}
// FeatureMatrix groups multiple related feature tests into a map.
//
// Useful when there is a small number of discrete features which are known
// at compile time.
//
// It must not be modified concurrently with calling [FeatureMatrix.Result].
type FeatureMatrix[K comparable] map[K]*FeatureTest
// Result returns the outcome of the feature test for the given key.
//
// It's safe to call this function concurrently.
//
// Always returns [ErrNotSupportedOnOS] on Windows.
func (fm FeatureMatrix[K]) Result(key K) error {
ft, ok := fm[key]
if !ok {
return fmt.Errorf("no feature probe for %v", key)
}
if platform.IsWindows {
return fmt.Errorf("%s: %w", ft.Name, ErrNotSupportedOnOS)
}
return ft.execute()
}
// FeatureCache caches a potentially unlimited number of feature probes.
//
// Useful when there is a high cardinality for a feature test.
type FeatureCache[K comparable] struct {
mu sync.RWMutex
newTest func(K) *FeatureTest
features map[K]*FeatureTest
}
func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] {
return &FeatureCache[K]{
newTest: newTest,
features: make(map[K]*FeatureTest),
}
}
func (fc *FeatureCache[K]) Result(key K) error {
if platform.IsWindows {
return fmt.Errorf("feature probe for %v: %w", key, ErrNotSupportedOnOS)
}
// NB: Executing the feature test happens without fc.mu taken.
return fc.retrieve(key).execute()
}
func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest {
fc.mu.RLock()
ft := fc.features[key]
fc.mu.RUnlock()
if ft != nil {
return ft
}
fc.mu.Lock()
defer fc.mu.Unlock()
if ft := fc.features[key]; ft != nil {
return ft
}
ft = fc.newTest(key)
fc.features[key] = ft
return ft
}

128
go/vendor/github.com/cilium/ebpf/internal/io.go generated vendored Normal file
View File

@@ -0,0 +1,128 @@
package internal
import (
"bufio"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
// buffered reader. It is a convenience function for reading subsections of
// ELF sections while minimizing the amount of read() syscalls made.
//
// Syscall overhead is non-negligible in continuous integration context
// where ELFs might be accessed over virtual filesystems with poor random
// access performance. Buffering reads makes sense because (sub)sections
// end up being read completely anyway.
//
// Use instead of the r.Seek() + io.LimitReader() pattern.
func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
// Clamp the size of the buffer to one page to avoid slurping large parts
// of a file into memory. bufio.NewReader uses a hardcoded default buffer
// of 4096. Allow arches with larger pages to allocate more, but don't
// allocate a fixed 4k buffer if we only need to read a small segment.
buf := n
if ps := int64(os.Getpagesize()); n > ps {
buf = ps
}
return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf))
}
// DiscardZeroes makes sure that all written bytes are zero
// before discarding them.
type DiscardZeroes struct{}
func (DiscardZeroes) Write(p []byte) (int, error) {
for _, b := range p {
if b != 0 {
return 0, errors.New("encountered non-zero byte")
}
}
return len(p), nil
}
// ReadAllCompressed decompresses a gzipped file into memory.
func ReadAllCompressed(file string) ([]byte, error) {
fh, err := os.Open(file)
if err != nil {
return nil, err
}
defer fh.Close()
gz, err := gzip.NewReader(fh)
if err != nil {
return nil, err
}
defer gz.Close()
return io.ReadAll(gz)
}
// ReadUint64FromFile reads a uint64 from a file.
//
// format specifies the contents of the file in fmt.Scanf syntax.
func ReadUint64FromFile(format string, path ...string) (uint64, error) {
filename := filepath.Join(path...)
data, err := os.ReadFile(filename)
if err != nil {
return 0, fmt.Errorf("reading file %q: %w", filename, err)
}
var value uint64
n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
if err != nil {
return 0, fmt.Errorf("parsing file %q: %w", filename, err)
}
if n != 1 {
return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
}
return value, nil
}
type uint64FromFileKey struct {
format, path string
}
var uint64FromFileCache = struct {
sync.RWMutex
values map[uint64FromFileKey]uint64
}{
values: map[uint64FromFileKey]uint64{},
}
// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result.
func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) {
filename := filepath.Join(path...)
key := uint64FromFileKey{format, filename}
uint64FromFileCache.RLock()
if value, ok := uint64FromFileCache.values[key]; ok {
uint64FromFileCache.RUnlock()
return value, nil
}
uint64FromFileCache.RUnlock()
value, err := ReadUint64FromFile(format, filename)
if err != nil {
return 0, err
}
uint64FromFileCache.Lock()
defer uint64FromFileCache.Unlock()
if value, ok := uint64FromFileCache.values[key]; ok {
// Someone else got here before us, use what is cached.
return value, nil
}
uint64FromFileCache.values[key] = value
return value, nil
}

View File

@@ -0,0 +1,20 @@
package kallsyms
import "sync"
type cache[K, V comparable] struct {
m sync.Map
}
func (c *cache[K, V]) Load(key K) (value V, _ bool) {
v, ok := c.m.Load(key)
if !ok {
return value, false
}
value = v.(V)
return value, true
}
func (c *cache[K, V]) Store(key K, value V) {
c.m.Store(key, value)
}

View File

@@ -0,0 +1,293 @@
package kallsyms
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"slices"
"strconv"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/platform"
)
var errAmbiguousKsym = errors.New("multiple kernel symbols with the same name")
var symAddrs cache[string, uint64]
var symModules cache[string, string]
// Module returns the kernel module providing the given symbol in the kernel, if
// any. Returns an empty string and no error if the symbol is not present in the
// kernel. Only function symbols are considered. Returns an error if multiple
// symbols with the same name were found.
//
// Consider [AssignModules] if you need to resolve multiple symbols, as it will
// only perform one iteration over /proc/kallsyms.
func Module(name string) (string, error) {
if name == "" {
return "", nil
}
if mod, ok := symModules.Load(name); ok {
return mod, nil
}
request := map[string]string{name: ""}
if err := AssignModules(request); err != nil {
return "", err
}
return request[name], nil
}
// AssignModules looks up the kernel module providing each given symbol, if any,
// and assigns them to their corresponding values in the symbols map. Only
// function symbols are considered. Results of all lookups are cached,
// successful or otherwise.
//
// Any symbols missing in the kernel are ignored. Returns an error if multiple
// symbols with a given name were found.
func AssignModules(symbols map[string]string) error {
if !platform.IsLinux {
return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS)
}
if len(symbols) == 0 {
return nil
}
// Attempt to fetch symbols from cache.
request := make(map[string]string)
for name := range symbols {
if mod, ok := symModules.Load(name); ok {
symbols[name] = mod
continue
}
// Mark the symbol to be read from /proc/kallsyms.
request[name] = ""
}
if len(request) == 0 {
// All symbols satisfied from cache.
return nil
}
f, err := os.Open("/proc/kallsyms")
if err != nil {
return err
}
defer f.Close()
if err := assignModules(f, request); err != nil {
return fmt.Errorf("assigning symbol modules: %w", err)
}
// Update the cache with the new symbols. Cache all requested symbols, even if
// they're missing or don't belong to a module.
for name, mod := range request {
symModules.Store(name, mod)
symbols[name] = mod
}
return nil
}
// assignModules assigns kernel symbol modules read from f to values requested
// by symbols. Always scans the whole input to make sure the user didn't request
// an ambiguous symbol.
func assignModules(f io.Reader, symbols map[string]string) error {
if len(symbols) == 0 {
return nil
}
found := make(map[string]struct{})
r := newReader(f)
for r.Line() {
// Only look for function symbols in the kernel's text section (tT).
s, err, skip := parseSymbol(r, []rune{'t', 'T'})
if err != nil {
return fmt.Errorf("parsing kallsyms line: %w", err)
}
if skip {
continue
}
if _, requested := symbols[string(s.name)]; !requested {
continue
}
if _, ok := found[string(s.name)]; ok {
// We've already seen this symbol. Return an error to avoid silently
// attaching to a symbol in the wrong module. libbpf also rejects
// referring to ambiguous symbols.
//
// We can't simply check if we already have a value for the given symbol,
// since many won't have an associated kernel module.
return fmt.Errorf("symbol %s: duplicate found at address 0x%x (module %q): %w",
s.name, s.addr, s.mod, errAmbiguousKsym)
}
symbols[string(s.name)] = string(s.mod)
found[string(s.name)] = struct{}{}
}
if err := r.Err(); err != nil {
return fmt.Errorf("reading kallsyms: %w", err)
}
return nil
}
// Address returns the address of the given symbol in the kernel. Returns 0 and
// no error if the symbol is not present. Returns an error if multiple addresses
// were found for a symbol.
//
// Consider [AssignAddresses] if you need to resolve multiple symbols, as it
// will only perform one iteration over /proc/kallsyms.
func Address(symbol string) (uint64, error) {
if symbol == "" {
return 0, nil
}
if addr, ok := symAddrs.Load(symbol); ok {
return addr, nil
}
request := map[string]uint64{symbol: 0}
if err := AssignAddresses(request); err != nil {
return 0, err
}
return request[symbol], nil
}
// AssignAddresses looks up the addresses of the requested symbols in the kernel
// and assigns them to their corresponding values in the symbols map. Results
// of all lookups are cached, successful or otherwise.
//
// Any symbols missing in the kernel are ignored. Returns an error if multiple
// addresses were found for a symbol.
func AssignAddresses(symbols map[string]uint64) error {
if !platform.IsLinux {
return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS)
}
if len(symbols) == 0 {
return nil
}
// Attempt to fetch symbols from cache.
request := make(map[string]uint64)
for name := range symbols {
if addr, ok := symAddrs.Load(name); ok {
symbols[name] = addr
continue
}
// Mark the symbol to be read from /proc/kallsyms.
request[name] = 0
}
if len(request) == 0 {
// All symbols satisfied from cache.
return nil
}
f, err := os.Open("/proc/kallsyms")
if err != nil {
return err
}
defer f.Close()
if err := assignAddresses(f, request); err != nil {
return fmt.Errorf("loading symbol addresses: %w", err)
}
// Update the cache with the new symbols. Cache all requested symbols even if
// they weren't found, to avoid repeated lookups.
for name, addr := range request {
symAddrs.Store(name, addr)
symbols[name] = addr
}
return nil
}
// assignAddresses assigns kernel symbol addresses read from f to values
// requested by symbols. Always scans the whole input to make sure the user
// didn't request an ambiguous symbol.
func assignAddresses(f io.Reader, symbols map[string]uint64) error {
if len(symbols) == 0 {
return nil
}
r := newReader(f)
for r.Line() {
s, err, skip := parseSymbol(r, nil)
if err != nil {
return fmt.Errorf("parsing kallsyms line: %w", err)
}
if skip {
continue
}
existing, requested := symbols[string(s.name)]
if existing != 0 {
// Multiple addresses for a symbol have been found. Return a friendly
// error to avoid silently attaching to the wrong symbol. libbpf also
// rejects referring to ambiguous symbols.
return fmt.Errorf("symbol %s(0x%x): duplicate found at address 0x%x: %w", s.name, existing, s.addr, errAmbiguousKsym)
}
if requested {
symbols[string(s.name)] = s.addr
}
}
if err := r.Err(); err != nil {
return fmt.Errorf("reading kallsyms: %w", err)
}
return nil
}
type ksym struct {
addr uint64
name []byte
mod []byte
}
// parseSymbol parses a line from /proc/kallsyms into an address, type, name and
// module. Skip will be true if the symbol doesn't match any of the given symbol
// types. See `man 1 nm` for all available types.
//
// Only yields symbols whose type is contained in types. An empty value for types
// disables this filtering.
//
// Example line: `ffffffffc1682010 T nf_nat_init\t[nf_nat]`
func parseSymbol(r *reader, types []rune) (s ksym, err error, skip bool) {
for i := 0; r.Word(); i++ {
switch i {
// Address of the symbol.
case 0:
s.addr, err = strconv.ParseUint(r.Text(), 16, 64)
if err != nil {
return s, fmt.Errorf("parsing address: %w", err), false
}
// Type of the symbol. Assume the character is ASCII-encoded by converting
// it directly to a rune, since it's a fixed field controlled by the kernel.
case 1:
if len(types) > 0 && !slices.Contains(types, rune(r.Bytes()[0])) {
return s, nil, true
}
// Name of the symbol.
case 2:
s.name = r.Bytes()
// Kernel module the symbol is provided by.
case 3:
s.mod = bytes.Trim(r.Bytes(), "[]")
// Ignore any future fields.
default:
return
}
}
return
}

Some files were not shown because too many files have changed in this diff Show More