Skip to content
This repository has been archived by the owner on Jun 19, 2024. It is now read-only.

kernel: sched: update from P615XXS7FXA1 #22

Merged
merged 1 commit into from
May 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions include/linux/ems.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
#include <linux/sched/idle.h>
#include <linux/sched/topology.h>

struct rq;

struct gb_qos_request {
struct plist_node node;
char *name;
Expand Down Expand Up @@ -50,6 +52,10 @@ extern int exynos_need_active_balance(enum cpu_idle_type idle,
/* wakeup balance */
extern int
exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync);
extern void update_last_waked_ns_task(struct task_struct *p);

/* load balance */
extern int frt_idle_pull_tasks(struct rq *dst_rq);

/* ontime migration */
extern void ontime_migration(void);
Expand Down Expand Up @@ -86,6 +92,12 @@ exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync
return -1;
}

static inline void update_last_waked_ns_task(struct task_struct *p) { }
static inline int
frt_idle_pull_tasks(struct rq *dst_rq)
{
return -1;
}
static inline void ontime_migration(void) { }
static inline int ontime_can_migration(struct task_struct *p, int cpu)
{
Expand Down
1 change: 1 addition & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -725,6 +725,7 @@ struct task_struct {
#endif
#ifdef CONFIG_SCHED_USE_FLUID_RT
int victim_flag;
u64 last_waked_ns;
#endif

#ifdef CONFIG_SCHED_EMS
Expand Down
128 changes: 128 additions & 0 deletions include/trace/events/ems.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,134 @@
#include <linux/sched.h>
#include <linux/tracepoint.h>

TRACE_EVENT(lb_cpu_util,

TP_PROTO(int cpu, char *label),

TP_ARGS(cpu, label),

TP_STRUCT__entry(
__field( int, cpu )
__field( int, active_balance )
__field( int, idle )
__field( int, nr_running )
__field( int, cfs_nr_running )
__field( unsigned long, cpu_util )
__field( unsigned long, capacity_orig )
__array( char, label, 64 )
),

TP_fast_assign(
__entry->cpu = cpu;
__entry->active_balance = cpu_rq(cpu)->active_balance;
__entry->idle = idle_cpu(cpu);
__entry->nr_running = cpu_rq(cpu)->nr_running;
__entry->cfs_nr_running = cpu_rq(cpu)->cfs.h_nr_running;
__entry->cpu_util = cpu_util(cpu);
__entry->capacity_orig = capacity_orig_of(cpu);
strncpy(__entry->label, label, 63);
),

TP_printk("cpu=%d ab=%d idle=%d nr_running=%d cfs_nr_running=%d cpu_util=%lu capacity=%lu reason=%s",
__entry->cpu, __entry->active_balance, __entry->idle, __entry->nr_running,
__entry->cfs_nr_running, __entry->cpu_util, __entry->capacity_orig, __entry->label)
);

TRACE_EVENT(lb_newidle_balance,

TP_PROTO(int this_cpu, int busy_cpu, int pulled, bool short_idle),

TP_ARGS(this_cpu, busy_cpu, pulled, short_idle),

TP_STRUCT__entry(
__field(int, cpu)
__field(int, busy_cpu)
__field(int, pulled)
__field(unsigned int, nr_running)
__field(unsigned int, rt_nr_running)
__field(int, nr_iowait)
__field(u64, avg_idle)
__field(bool, short_idle)
__field(int, overload)
),

TP_fast_assign(
__entry->cpu = this_cpu;
__entry->busy_cpu = busy_cpu;
__entry->pulled = pulled;
__entry->nr_running = cpu_rq(this_cpu)->nr_running;
__entry->rt_nr_running = cpu_rq(this_cpu)->rt.rt_nr_running;
__entry->nr_iowait = atomic_read(&(cpu_rq(this_cpu)->nr_iowait));
__entry->avg_idle = cpu_rq(this_cpu)->avg_idle;
__entry->short_idle = short_idle;
__entry->overload = cpu_rq(this_cpu)->rd->overload;
),

TP_printk("cpu=%d busy_cpu=%d pulled=%d nr_run=%u rt_nr_run=%u nr_iowait=%d avg_idle=%llu short_idle=%d overload=%d",
__entry->cpu, __entry->busy_cpu, __entry->pulled,
__entry->nr_running, __entry->rt_nr_running,
__entry->nr_iowait, __entry->avg_idle,
__entry->short_idle, __entry->overload)
);

TRACE_EVENT(lb_active_migration,

TP_PROTO(struct task_struct *p, int prev_cpu, int new_cpu, char *label),

TP_ARGS(p, prev_cpu, new_cpu, label),

TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, misfit )
__field( u64, util )
__field( int, prev_cpu )
__field( int, new_cpu )
__array( char, label, 64 )
),

TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->util = task_util(p);
__entry->prev_cpu = prev_cpu;
__entry->new_cpu = new_cpu;
strncpy(__entry->label, label, 63);
),

TP_printk("comm=%s pid=%d util=%llu prev_cpu=%d new_cpu=%d reason=%s",
__entry->comm, __entry->pid, __entry->util,
__entry->prev_cpu, __entry->new_cpu, __entry->label)
);

TRACE_EVENT(lb_can_migrate_task,

TP_PROTO(struct task_struct *tsk, int dst_cpu, int migrate, char *label),

TP_ARGS(tsk, dst_cpu, migrate, label),

TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, src_cpu )
__field( int, dst_cpu )
__field( int, migrate )
__array( char, label, 64 )
),

TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->src_cpu = task_cpu(tsk);
__entry->dst_cpu = dst_cpu;
__entry->migrate = migrate;
strncpy(__entry->label, label, 63);
),

TP_printk("comm=%s pid=%d src_cpu=%d dst_cpu=%d migrate=%d reason=%s",
__entry->comm, __entry->pid, __entry->src_cpu, __entry->dst_cpu,
__entry->migrate, __entry->label)
);
/*
* Tracepoint for selecting eco cpu
*/
Expand Down
27 changes: 27 additions & 0 deletions include/trace/events/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,33 @@ TRACE_EVENT(sched_fluid_stat,
__entry->util_avg,
__entry->selectby)
);

TRACE_EVENT(sched_frt_idle_pull_tasks,

TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),

TP_ARGS(tsk, src_cpu, dst_cpu),

TP_STRUCT__entry(
__array( char, name, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, src_cpu )
__field( int, dst_cpu )
),

TP_fast_assign(
memcpy(__entry->name, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->src_cpu = src_cpu;
__entry->dst_cpu = dst_cpu;
),
TP_printk("frt: comm=%s pid=%d src_cpu=%d dst_cpu=%d",
__entry->name,
__entry->pid,
__entry->src_cpu,
__entry->dst_cpu)
);

/*
* Tracepoint for accounting sched averages for tasks.
*/
Expand Down
3 changes: 3 additions & 0 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2116,6 +2116,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,

walt_try_to_wake_up(p);

if (sched_feat(EXYNOS_MS))
update_last_waked_ns_task(p);

p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;

Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/ems/band.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
#include <linux/sched/signal.h>
#include <trace/events/ems.h>

#include "ems.h"
#include "../sched.h"
#include "ems.h"

static struct task_band *lookup_band(struct task_struct *p)
{
Expand Down
Loading