Skip to content

Commit

Permalink
Revert "sched: Improve the scheduler"
Browse files Browse the repository at this point in the history
This reverts commit 4e41a7c.
  • Loading branch information
mizdrake7 committed Dec 23, 2023
1 parent 309a544 commit 2f9cdb0
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 70 deletions.
1 change: 0 additions & 1 deletion include/linux/sched/sysctl.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
Expand Down
55 changes: 6 additions & 49 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2226,7 +2226,7 @@ u64 freq_policy_load(struct rq *rq);
extern u64 walt_load_reported_window;

static inline unsigned long
__cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
u64 util, util_unboosted;
struct rq *rq = cpu_rq(cpu);
Expand Down Expand Up @@ -2264,41 +2264,6 @@ __cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
return (util >= capacity) ? capacity : util;
}

#define ADJUSTED_ASYM_CAP_CPU_UTIL(orig, other, x) \
(max(orig, mult_frac(other, x, 100)))

static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
{
struct sched_walt_cpu_load wl_other = {0};
unsigned long util = 0, util_other = 0;
unsigned long capacity = capacity_orig_of(cpu);
int i, mpct = sysctl_sched_asym_cap_sibling_freq_match_pct;

if (!cpumask_test_cpu(cpu, &asym_cap_sibling_cpus))
return __cpu_util_freq_walt(cpu, walt_load);

for_each_cpu(i, &asym_cap_sibling_cpus) {
if (i == cpu)
util = __cpu_util_freq_walt(cpu, walt_load);
else
util_other = __cpu_util_freq_walt(i, &wl_other);
}

if (cpu == cpumask_last(&asym_cap_sibling_cpus))
mpct = 100;

util = ADJUSTED_ASYM_CAP_CPU_UTIL(util, util_other, mpct);
walt_load->prev_window_util = util;

walt_load->nl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->nl, wl_other.nl,
mpct);
walt_load->pl = ADJUSTED_ASYM_CAP_CPU_UTIL(walt_load->pl, wl_other.pl,
mpct);

return (util >= capacity) ? capacity : util;
}

static inline unsigned long
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
{
Expand Down Expand Up @@ -3146,9 +3111,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
if (src_cpu == dst_cpu)
return 1;

if (asym_cap_siblings(src_cpu, dst_cpu))
return 1;

return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
}

Expand Down Expand Up @@ -3306,16 +3268,11 @@ static inline bool is_min_capacity_cpu(int cpu)
return -1;
}

static inline struct sched_cluster *rq_cluster(struct rq *rq)
{
return NULL;
}

static inline int asym_cap_siblings(int cpu1, int cpu2) { return 0; }

static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
return load;
return unlikely(min_cpu == -1) ||
capacity_orig_of(cpu) == capacity_orig_of(min_cpu);
#else
return true;
#endif
}

#ifdef CONFIG_SMP
Expand Down
12 changes: 1 addition & 11 deletions kernel/sched/walt.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,6 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = TICK_NSEC;
unsigned int sysctl_sched_walt_rotate_big_tasks;
unsigned int walt_rotation_enabled;

__read_mostly unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct = 100;
__read_mostly unsigned int sched_ravg_hist_size = 5;

static __read_mostly unsigned int sched_io_is_busy = 1;
Expand Down Expand Up @@ -3321,7 +3320,7 @@ void walt_irq_work(struct irq_work *irq_work)
struct rq *rq;
int cpu;
u64 wc;
bool is_migration = false, is_asym_migration = false;
bool is_migration = false;
u64 total_grp_load = 0, min_cluster_grp_load = 0;
int level = 0;

Expand Down Expand Up @@ -3352,11 +3351,6 @@ void walt_irq_work(struct irq_work *irq_work)
account_load_subtractions(rq);
aggr_grp_load += rq->grp_time.prev_runnable_sum;
}
if (is_migration && rq->notif_pending &&
cpumask_test_cpu(cpu, &asym_cap_sibling_cpus)) {
is_asym_migration = true;
rq->notif_pending = false;
}
}

cluster->aggr_grp_load = aggr_grp_load;
Expand Down Expand Up @@ -3398,10 +3392,6 @@ void walt_irq_work(struct irq_work *irq_work)
}
}

if (is_asym_migration && cpumask_test_cpu(cpu,
&asym_cap_sibling_cpus))
flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;

if (i == num_cpus)
cpufreq_update_util(cpu_rq(cpu), flag);
else
Expand Down
9 changes: 0 additions & 9 deletions kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,15 +498,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = sched_boost_handler,
.extra1 = &neg_three,
.extra2 = &three,

{
.procname = "sched_asym_cap_sibling_freq_match_pct",
.data = &sysctl_sched_asym_cap_sibling_freq_match_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &one,
.extra2 = &one_hundred,
},
#endif
{
Expand Down

0 comments on commit 2f9cdb0

Please sign in to comment.