Skip to content

Commit fd057be

Browse files
Peter Zijlstrasmb49
authored andcommitted
perf: Fix cgroup state vs ERROR
BugLink: https://bugs.launchpad.net/bugs/2120812 [ Upstream commit 61988e36dc5457cdff7ae7927e8d9ad1419ee998 ] While chasing down a missing perf_cgroup_event_disable() elsewhere, Leo Yan found that both perf_put_aux_event() and perf_remove_sibling_event() were also missing one. Specifically, the rule is that events that switch to OFF,ERROR need to call perf_cgroup_event_disable(). Unify the disable paths to ensure this. Fixes: ab43762 ("perf: Allow normal events to output AUX data") Fixes: 9f0c4fa ("perf/core: Add a new PERF_EV_CAP_SIBLING event capability") Reported-by: Leo Yan <leo.yan@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250605123343.GD35970@noisy.programming.kicks-ass.net Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Noah Wager <noah.wager@canonical.com> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
1 parent aaec913 commit fd057be

File tree

1 file changed

+30
-21
lines changed

1 file changed

+30
-21
lines changed

kernel/events/core.c

Lines changed: 30 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2112,8 +2112,9 @@ perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
21122112
}
21132113

21142114
static void put_event(struct perf_event *event);
2115-
static void event_sched_out(struct perf_event *event,
2116-
struct perf_event_context *ctx);
2115+
static void __event_disable(struct perf_event *event,
2116+
struct perf_event_context *ctx,
2117+
enum perf_event_state state);
21172118

21182119
static void perf_put_aux_event(struct perf_event *event)
21192120
{
@@ -2146,8 +2147,7 @@ static void perf_put_aux_event(struct perf_event *event)
21462147
* state so that we don't try to schedule it again. Note
21472148
* that perf_event_enable() will clear the ERROR status.
21482149
*/
2149-
event_sched_out(iter, ctx);
2150-
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2150+
__event_disable(iter, ctx, PERF_EVENT_STATE_ERROR);
21512151
}
21522152
}
21532153

@@ -2205,18 +2205,6 @@ static inline struct list_head *get_event_list(struct perf_event *event)
22052205
&event->pmu_ctx->flexible_active;
22062206
}
22072207

2208-
/*
2209-
* Events that have PERF_EV_CAP_SIBLING require being part of a group and
2210-
* cannot exist on their own, schedule them out and move them into the ERROR
2211-
* state. Also see _perf_event_enable(), it will not be able to recover
2212-
* this ERROR state.
2213-
*/
2214-
static inline void perf_remove_sibling_event(struct perf_event *event)
2215-
{
2216-
event_sched_out(event, event->ctx);
2217-
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2218-
}
2219-
22202208
static void perf_group_detach(struct perf_event *event)
22212209
{
22222210
struct perf_event *leader = event->group_leader;
@@ -2252,8 +2240,15 @@ static void perf_group_detach(struct perf_event *event)
22522240
*/
22532241
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
22542242

2243+
/*
2244+
* Events that have PERF_EV_CAP_SIBLING require being part of
2245+
* a group and cannot exist on their own, schedule them out
2246+
* and move them into the ERROR state. Also see
2247+
* _perf_event_enable(), it will not be able to recover this
2248+
* ERROR state.
2249+
*/
22552250
if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2256-
perf_remove_sibling_event(sibling);
2251+
__event_disable(sibling, ctx, PERF_EVENT_STATE_ERROR);
22572252

22582253
sibling->group_leader = sibling;
22592254
list_del_init(&sibling->sibling_list);
@@ -2513,6 +2508,15 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
25132508
event_function_call(event, __perf_remove_from_context, (void *)flags);
25142509
}
25152510

2511+
static void __event_disable(struct perf_event *event,
2512+
struct perf_event_context *ctx,
2513+
enum perf_event_state state)
2514+
{
2515+
event_sched_out(event, ctx);
2516+
perf_cgroup_event_disable(event, ctx);
2517+
perf_event_set_state(event, state);
2518+
}
2519+
25162520
/*
25172521
* Cross CPU call to disable a performance event
25182522
*/
@@ -2527,13 +2531,18 @@ static void __perf_event_disable(struct perf_event *event,
25272531
perf_pmu_disable(event->pmu_ctx->pmu);
25282532
ctx_time_update_event(ctx, event);
25292533

2534+
/*
2535+
* When disabling a group leader, the whole group becomes ineligible
2536+
* to run, so schedule out the full group.
2537+
*/
25302538
if (event == event->group_leader)
25312539
group_sched_out(event, ctx);
2532-
else
2533-
event_sched_out(event, ctx);
25342540

2535-
perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2536-
perf_cgroup_event_disable(event, ctx);
2541+
/*
2542+
* But only mark the leader OFF; the siblings will remain
2543+
* INACTIVE.
2544+
*/
2545+
__event_disable(event, ctx, PERF_EVENT_STATE_OFF);
25372546

25382547
perf_pmu_enable(event->pmu_ctx->pmu);
25392548
}

0 commit comments

Comments
 (0)