diff --git a/support/ebpf/dotnet_tracer.ebpf.c b/support/ebpf/dotnet_tracer.ebpf.c index ba8d6ce8..c2f3c0c6 100644 --- a/support/ebpf/dotnet_tracer.ebpf.c +++ b/support/ebpf/dotnet_tracer.ebpf.c @@ -246,7 +246,7 @@ ErrorCode unwind_one_dotnet_frame(PerCPURecord *record, DotnetProcInfo *vi, bool // or interpreter dispatcher. It does not reset the trace object and will append the // dotnet stack frames to the trace object for the current CPU. BPF_PROBE(unwind_dotnet) -int unwind_dotnet(BPF_CONTEXT) +int unwind_dotnet(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) { diff --git a/support/ebpf/helpers.h b/support/ebpf/helpers.h index 049c79ed..ddaf83cf 100644 --- a/support/ebpf/helpers.h +++ b/support/ebpf/helpers.h @@ -6,12 +6,8 @@ // Macros for BPF program type and context handling. #ifdef EXTERNAL_TRIGGER #define BPF_PROBE(name) SEC("kprobe/"#name) -#define BPF_CONTEXT struct pt_regs *ctx -#define GET_REGS(ctx) (ctx) #else #define BPF_PROBE(name) SEC("perf_event/"#name) -#define BPF_CONTEXT struct bpf_perf_event_data *ctx -#define GET_REGS(ctx) ((struct pt_regs *)&ctx->regs) #endif #endif // OPTI_HELPERS_H \ No newline at end of file diff --git a/support/ebpf/hotspot_tracer.ebpf.c b/support/ebpf/hotspot_tracer.ebpf.c index f57b1293..514bc2ce 100644 --- a/support/ebpf/hotspot_tracer.ebpf.c +++ b/support/ebpf/hotspot_tracer.ebpf.c @@ -886,7 +886,7 @@ static ErrorCode hotspot_unwind_one_frame(PerCPURecord *record, HotspotProcInfo // and it recursive unwinds all HotSpot frames and then jumps back to unwind further // native frames that follow. BPF_PROBE(unwind_hotspot) -int unwind_hotspot(BPF_CONTEXT) +int unwind_hotspot(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) diff --git a/support/ebpf/interpreter_dispatcher.ebpf.c b/support/ebpf/interpreter_dispatcher.ebpf.c index 1e8f8fbc..d9271e4e 100644 --- a/support/ebpf/interpreter_dispatcher.ebpf.c +++ b/support/ebpf/interpreter_dispatcher.ebpf.c @@ -174,7 +174,7 @@ void maybe_add_apm_info(Trace *trace) { } BPF_PROBE(unwind_stop) -int unwind_stop(BPF_CONTEXT) +int unwind_stop(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) diff --git a/support/ebpf/native_stack_trace.ebpf.c b/support/ebpf/native_stack_trace.ebpf.c index ce82b8c6..7959d697 100644 --- a/support/ebpf/native_stack_trace.ebpf.c +++ b/support/ebpf/native_stack_trace.ebpf.c @@ -749,7 +749,7 @@ static inline ErrorCode get_usermode_regs(struct pt_regs *ctx, #endif BPF_PROBE(unwind_native) -int unwind_native(BPF_CONTEXT) +int unwind_native(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -860,8 +860,10 @@ int collect_trace(struct pt_regs *ctx) { } BPF_PROBE(native_tracer_entry) -int native_tracer_entry(BPF_CONTEXT) +// int native_tracer_entry(REAL_struct pt_regs *ctx) +int native_tracer_entry(struct bpf_perf_event_data *ctx) { - struct pt_regs *regs = GET_REGS(ctx); - return collect_trace(regs); + // struct pt_regs *regs = GET_REGS(ctx); + // return collect_trace(regs); + return collect_trace((struct pt_regs*) &ctx->regs); } diff --git a/support/ebpf/perl_tracer.ebpf.c b/support/ebpf/perl_tracer.ebpf.c index 1f59c67c..50493308 100644 --- a/support/ebpf/perl_tracer.ebpf.c +++ b/support/ebpf/perl_tracer.ebpf.c @@ -358,7 +358,7 @@ int walk_perl_stack(PerCPURecord *record, const PerlProcInfo *perlinfo) { // or interpreter dispatcher. It does not reset the trace object and will append the // Perl stack frames to the trace object for the current CPU. BPF_PROBE(unwind_perl) -int unwind_perl(BPF_CONTEXT) +int unwind_perl(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) { diff --git a/support/ebpf/php_tracer.ebpf.c b/support/ebpf/php_tracer.ebpf.c index f0c5c115..2afd29d0 100644 --- a/support/ebpf/php_tracer.ebpf.c +++ b/support/ebpf/php_tracer.ebpf.c @@ -184,7 +184,7 @@ int walk_php_stack(PerCPURecord *record, PHPProcInfo *phpinfo, bool is_jitted) { } BPF_PROBE(unwind_php) -int unwind_php(BPF_CONTEXT) +int unwind_php(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) diff --git a/support/ebpf/python_tracer.ebpf.c b/support/ebpf/python_tracer.ebpf.c index e047b1cb..d945b565 100644 --- a/support/ebpf/python_tracer.ebpf.c +++ b/support/ebpf/python_tracer.ebpf.c @@ -278,7 +278,7 @@ ErrorCode get_PyFrame(const PyProcInfo *pyinfo, void **frame) { // or interpreter dispatcher. It does not reset the trace object and will append the // Python stack frames to the trace object for the current CPU. BPF_PROBE(unwind_python) -int unwind_python(BPF_CONTEXT) +int unwind_python(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) diff --git a/support/ebpf/ruby_tracer.ebpf.c b/support/ebpf/ruby_tracer.ebpf.c index a062b319..c601d5d3 100644 --- a/support/ebpf/ruby_tracer.ebpf.c +++ b/support/ebpf/ruby_tracer.ebpf.c @@ -218,7 +218,7 @@ ErrorCode walk_ruby_stack(PerCPURecord *record, const RubyProcInfo *rubyinfo, } BPF_PROBE(unwind_ruby) -int unwind_ruby(BPF_CONTEXT) +int unwind_ruby(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) diff --git a/support/ebpf/v8_tracer.ebpf.c b/support/ebpf/v8_tracer.ebpf.c index 1e03a6e2..6f92cf69 100644 --- a/support/ebpf/v8_tracer.ebpf.c +++ b/support/ebpf/v8_tracer.ebpf.c @@ -286,7 +286,7 @@ ErrorCode unwind_one_v8_frame(PerCPURecord *record, V8ProcInfo *vi, bool top) { // or interpreter dispatcher. It does not reset the trace object and will append the // V8 stack frames to the trace object for the current CPU. BPF_PROBE(unwind_v8) -int unwind_v8(BPF_CONTEXT) +int unwind_v8(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) {