Skip to content

Commit

Permalink
fixup: generate perf_event and kprobe programs at compile time
Browse files Browse the repository at this point in the history
Signed-off-by: Florian Lehner <florian.lehner@elastic.co>
  • Loading branch information
florianl committed Nov 8, 2024
1 parent 3ae4021 commit c1724aa
Show file tree
Hide file tree
Showing 13 changed files with 50 additions and 22 deletions.
3 changes: 2 additions & 1 deletion support/ebpf/dotnet_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ ErrorCode unwind_one_dotnet_frame(PerCPURecord *record, DotnetProcInfo *vi, bool
// unwind_dotnet is the entry point for tracing when invoked from the native tracer
// or interpreter dispatcher. It does not reset the trace object and will append the
// dotnet stack frames to the trace object for the current CPU.
SEC("perf_event/unwind_dotnet")
static inline __attribute__((__always_inline__))
int unwind_dotnet(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record) {
Expand Down Expand Up @@ -289,3 +289,4 @@ int unwind_dotnet(struct pt_regs *ctx) {
DEBUG_PRINT("dotnet: tail call for next frame unwinder (%d) failed", unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_dotnet)
3 changes: 2 additions & 1 deletion support/ebpf/hotspot_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -890,7 +890,7 @@ static ErrorCode hotspot_unwind_one_frame(PerCPURecord *record, HotspotProcInfo
// unwind_hotspot is the entry point for tracing when invoked from the native tracer
// and it recursive unwinds all HotSpot frames and then jumps back to unwind further
// native frames that follow.
SEC("perf_event/unwind_hotspot")
static inline __attribute__((__always_inline__))
int unwind_hotspot(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -927,3 +927,4 @@ int unwind_hotspot(struct pt_regs *ctx) {
DEBUG_PRINT("jvm: tail call for next frame unwinder (%d) failed", unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_hotspot)
4 changes: 3 additions & 1 deletion support/ebpf/interpreter_dispatcher.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,8 @@ void maybe_add_apm_info(Trace *trace) {
trace->apm_transaction_id.as_int, corr_buf.trace_flags);
}

SEC("perf_event/unwind_stop")
// unwind_stop is the tail call destination for PROG_UNWIND_STOP.
static inline __attribute__((__always_inline__))
int unwind_stop(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -238,6 +239,7 @@ int unwind_stop(struct pt_regs *ctx) {

return 0;
}
MULTI_USE_FUNC(unwind_stop)

char _license[] SEC("license") = "GPL";
// this number will be interpreted by the elf loader
Expand Down
4 changes: 3 additions & 1 deletion support/ebpf/native_stack_trace.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,8 @@ static ErrorCode unwind_one_frame(u64 pid, u32 frame_idx, struct UnwindState *st
#error unsupported architecture
#endif

SEC("perf_event/unwind_native")
// unwind_native is the tail call destination for PROG_UNWIND_NATIVE.
static inline __attribute__((__always_inline__))
int unwind_native(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -632,3 +633,4 @@ int native_tracer_entry(struct bpf_perf_event_data *ctx) {
u64 ts = bpf_ktime_get_ns();
return collect_trace((struct pt_regs*) &ctx->regs, TRACE_SAMPLING, pid, tid, ts, 0);
}
MULTI_USE_FUNC(unwind_native)
3 changes: 2 additions & 1 deletion support/ebpf/perl_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ int walk_perl_stack(PerCPURecord *record, const PerlProcInfo *perlinfo) {
// unwind_perl is the entry point for tracing when invoked from the native tracer
// or interpreter dispatcher. It does not reset the trace object and will append the
// Perl stack frames to the trace object for the current CPU.
SEC("perf_event/unwind_perl")
static inline __attribute__((__always_inline__))
int unwind_perl(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record) {
Expand Down Expand Up @@ -426,3 +426,4 @@ int unwind_perl(struct pt_regs *ctx) {
tail_call(ctx, unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_perl)
4 changes: 3 additions & 1 deletion support/ebpf/php_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,8 @@ int walk_php_stack(PerCPURecord *record, PHPProcInfo *phpinfo, bool is_jitted) {
return unwinder;
}

SEC("perf_event/unwind_php")
// unwind_php is the tail call destination for PROG_UNWIND_PHP.
static inline __attribute__((__always_inline__))
int unwind_php(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -239,3 +240,4 @@ int unwind_php(struct pt_regs *ctx) {
tail_call(ctx, unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_php)
3 changes: 2 additions & 1 deletion support/ebpf/python_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ ErrorCode get_PyFrame(const PyProcInfo *pyinfo, void **frame) {
// unwind_python is the entry point for tracing when invoked from the native tracer
// or interpreter dispatcher. It does not reset the trace object and will append the
// Python stack frames to the trace object for the current CPU.
SEC("perf_event/unwind_python")
static inline __attribute__((__always_inline__))
int unwind_python(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -318,3 +318,4 @@ int unwind_python(struct pt_regs *ctx) {
tail_call(ctx, unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_python)
4 changes: 3 additions & 1 deletion support/ebpf/ruby_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,8 @@ ErrorCode walk_ruby_stack(PerCPURecord *record, const RubyProcInfo *rubyinfo,
return ERR_OK;
}

SEC("perf_event/unwind_ruby")
// unwind_ruby is the tail call destination for PROG_UNWIND_RUBY.
static inline __attribute__((__always_inline__))
int unwind_ruby(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record)
Expand Down Expand Up @@ -273,3 +274,4 @@ int unwind_ruby(struct pt_regs *ctx) {
tail_call(ctx, unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_ruby)
14 changes: 14 additions & 0 deletions support/ebpf/tracemgmt.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,20 @@
#include "types.h"
#include "errors.h"

// MULTI_USE_FUNC generates perf event and kprobe eBPF programs
// for a given function.
#define MULTI_USE_FUNC(func_name) \
SEC("perf_event/"#func_name) \
int perf_##func_name(struct pt_regs *ctx) { \
return func_name(ctx); \
} \
\
SEC("kprobe/"#func_name) \
int kprobe_##func_name(struct pt_regs *ctx) { \
return func_name(ctx); \
}


// increment_metric increments the value of the given metricID by 1
static inline __attribute__((__always_inline__))
void increment_metric(u32 metricID) {
Expand Down
Binary file modified support/ebpf/tracer.ebpf.release.amd64
Binary file not shown.
Binary file modified support/ebpf/tracer.ebpf.release.arm64
Binary file not shown.
3 changes: 2 additions & 1 deletion support/ebpf/v8_tracer.ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ ErrorCode unwind_one_v8_frame(PerCPURecord *record, V8ProcInfo *vi, bool top) {
// unwind_v8 is the entry point for tracing when invoked from the native tracer
// or interpreter dispatcher. It does not reset the trace object and will append the
// V8 stack frames to the trace object for the current CPU.
SEC("perf_event/unwind_v8")
static inline __attribute__((__always_inline__))
int unwind_v8(struct pt_regs *ctx) {
PerCPURecord *record = get_per_cpu_record();
if (!record) {
Expand Down Expand Up @@ -328,3 +328,4 @@ int unwind_v8(struct pt_regs *ctx) {
DEBUG_PRINT("v8: tail call for next frame unwinder (%d) failed", unwinder);
return -1;
}
MULTI_USE_FUNC(unwind_v8)
27 changes: 14 additions & 13 deletions tracer/tracer.go
Original file line number Diff line number Diff line change
Expand Up @@ -602,9 +602,14 @@ func loadPerfUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf.P
continue
}

progSpec, ok := coll.Programs[unwindProg.name]
unwindProgName := unwindProg.name
if !unwindProg.noTailCallTarget {
unwindProgName = "perf_" + unwindProg.name
}

progSpec, ok := coll.Programs[unwindProgName]
if !ok {
return fmt.Errorf("program %s does not exist", unwindProg.name)
return fmt.Errorf("program %s does not exist", unwindProgName)
}

if err := loadProgram(ebpfProgs, tailcallMap, unwindProg.progID, progSpec,
Expand Down Expand Up @@ -666,9 +671,14 @@ func loadKProbeUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf
continue
}

progSpec, ok := coll.Programs[unwindProg.name]
unwindProgName := unwindProg.name
if !unwindProg.noTailCallTarget {
unwindProgName = "kprobe_" + unwindProg.name
}

progSpec, ok := coll.Programs[unwindProgName]
if !ok {
return fmt.Errorf("program %s does not exist", unwindProg.name)
return fmt.Errorf("program %s does not exist", unwindProgName)
}

// Replace the prog array for the tail calls.
Expand All @@ -679,15 +689,6 @@ func loadKProbeUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf
}
}

// All the tail call targets are perf event programs. To be able to tail call them
// from a kprobe, adjust their specification.
if !unwindProg.noTailCallTarget {
// Adjust program type
progSpec.Type = cebpf.Kprobe

// Adjust program name for easier debugging
progSpec.Name = "kp_" + progSpec.Name
}
if err := loadProgram(ebpfProgs, tailcallMap, unwindProg.progID, progSpec,
programOptions, unwindProg.noTailCallTarget); err != nil {
return err
Expand Down

0 comments on commit c1724aa

Please sign in to comment.