Skip to content

Commit 15a65b2

Browse files
Luo Gengkunsmb49
authored andcommitted
perf/core: Fix WARN in perf_cgroup_switch()
BugLink: https://bugs.launchpad.net/bugs/2120812 [ Upstream commit 3172fb986666dfb71bf483b6d3539e1e587fa197 ] There may be concurrency between perf_cgroup_switch and perf_cgroup_event_disable. Consider the following scenario: after a new perf cgroup event is created on CPU0, the new event may not trigger a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1 disables this perf event, it executes __perf_remove_from_context-> list _del_event->perf_cgroup_event_disable on CPU1, which causes a race with perf_cgroup_switch running on CPU0. The following describes the details of this concurrency scenario: CPU0 CPU1 perf_cgroup_switch: ... # cpuctx->cgrp is not NULL here if (READ_ONCE(cpuctx->cgrp) == NULL) return; perf_remove_from_context: ... raw_spin_lock_irq(&ctx->lock); ... # ctx->is_active == 0 because reprogramm is not # tigger, so CPU1 can do __perf_remove_from_context # for CPU0 __perf_remove_from_context: perf_cgroup_event_disable: ... if (--ctx->nr_cgroups) ... # this warning will happened because CPU1 changed # ctx.nr_cgroups to 0. WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); [peterz: use guard instead of goto unlock] Fixes: db4a835 ("perf/core: Set cgroup in CPU contexts for new cgroup events") Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250604033924.3914647-3-luogengkun@huaweicloud.com Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Noah Wager <noah.wager@canonical.com> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
1 parent fd057be commit 15a65b2

File tree

1 file changed

+20
-2
lines changed

1 file changed

+20
-2
lines changed

kernel/events/core.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
206206
__perf_ctx_unlock(&cpuctx->ctx);
207207
}
208208

209+
typedef struct {
210+
struct perf_cpu_context *cpuctx;
211+
struct perf_event_context *ctx;
212+
} class_perf_ctx_lock_t;
213+
214+
static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
215+
{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
216+
217+
static inline class_perf_ctx_lock_t
218+
class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
219+
struct perf_event_context *ctx)
220+
{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
221+
209222
#define TASK_TOMBSTONE ((void *)-1L)
210223

211224
static bool is_kernel_event(struct perf_event *event)
@@ -903,7 +916,13 @@ static void perf_cgroup_switch(struct task_struct *task)
903916
if (READ_ONCE(cpuctx->cgrp) == cgrp)
904917
return;
905918

906-
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
919+
guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
920+
/*
921+
* Re-check, could've raced vs perf_remove_from_context().
922+
*/
923+
if (READ_ONCE(cpuctx->cgrp) == NULL)
924+
return;
925+
907926
perf_ctx_disable(&cpuctx->ctx, true);
908927

909928
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
@@ -921,7 +940,6 @@ static void perf_cgroup_switch(struct task_struct *task)
921940
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
922941

923942
perf_ctx_enable(&cpuctx->ctx, true);
924-
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
925943
}
926944

927945
static int perf_cgroup_ensure_storage(struct perf_event *event,

0 commit comments

Comments
 (0)