diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h index ee20da56d5d230..840009dc1e8dc1 100644 --- a/kernel/sched/bmq.h +++ b/kernel/sched/bmq.h @@ -71,14 +71,16 @@ inline int task_running_nice(struct task_struct *p) } static inline void sched_update_rq_clock(struct rq *rq) {} -static inline void sched_task_renew(struct task_struct *p, const struct rq *rq) {} -static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {} -static void sched_task_fork(struct task_struct *p, struct rq *rq) +static inline void sched_task_renew(struct task_struct *p, const struct rq *rq) { - p->boost_prio = MAX_PRIORITY_ADJ; + if (rq_switch_time(rq) > sysctl_sched_base_slice) + deboost_task(p); } +static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {} +static void sched_task_fork(struct task_struct *p, struct rq *rq) {} + static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) { p->boost_prio = MAX_PRIORITY_ADJ; @@ -92,10 +94,6 @@ static inline void sched_task_ttwu(struct task_struct *p) static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) { - u64 switch_ns = rq_switch_time(rq); - - if (switch_ns < boost_threshold(p)) + if (rq_switch_time(rq) < boost_threshold(p)) boost_task(p); - else if (switch_ns > sysctl_sched_base_slice) - deboost_task(p); }