diff --git a/include/linux/sched.h b/include/linux/sched.h index 16dfa4084e1763..1a5bbb0b3bace6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1511,6 +1511,7 @@ struct sched_dl_entity { * code. */ int dl_throttled, dl_boosted, dl_yielded, dl_non_contending; + int dl_overrun; /* * Bandwidth enforcement timer. Each -deadline task has its diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 39827e30061357..8544cab36debc2 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -49,6 +49,12 @@ */ #define SCHED_FLAG_RESET_ON_FORK 0x01 #define SCHED_FLAG_RECLAIM 0x02 -#define SCHED_FLAG_SPECIAL 0x04 +#define SCHED_FLAG_DL_OVERRUN 0x04 +#define SCHED_FLAG_SPECIAL 0x08 + +#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \ + SCHED_FLAG_RECLAIM | \ + SCHED_FLAG_SPECIAL | \ + SCHED_FLAG_DL_OVERRUN) #endif /* _UAPI_LINUX_SCHED_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 10324a516bc080..a07639e4953f7d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2233,6 +2233,7 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_throttled = 0; dl_se->dl_yielded = 0; dl_se->dl_non_contending = 0; + dl_se->dl_overrun = 0; } /* @@ -4006,14 +4007,29 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, } static void -__getparam_dl(struct task_struct *p, struct sched_attr *attr) +__getparam_dl(struct task_struct *p, struct sched_attr *attr, unsigned int flags) { struct sched_dl_entity *dl_se = &p->dl; attr->sched_priority = p->rt_priority; - attr->sched_runtime = dl_se->dl_runtime; - attr->sched_deadline = dl_se->dl_deadline; + + if (flags == 1 && p == current) { + update_curr_dl(task_rq(p)); + + /* + * sched_runtime can never be negative because, since this + * operation can be performed by the task on its own + * sched_attr, if the bandwidth is <= 0, then the task is + * throttled and therefore cannot perform the syscall. + */ + attr->sched_runtime = dl_se->runtime; + attr->sched_deadline = dl_se->deadline; + } else { + attr->sched_runtime = dl_se->dl_runtime; + attr->sched_deadline = dl_se->dl_deadline; + } attr->sched_period = dl_se->dl_period; + attr->sched_flags = dl_se->flags; } @@ -4119,10 +4135,7 @@ static int __sched_setscheduler(struct task_struct *p, return -EINVAL; } - if (attr->sched_flags & - ~(SCHED_FLAG_RESET_ON_FORK | - SCHED_FLAG_RECLAIM | - SCHED_FLAG_SPECIAL)) + if (attr->sched_flags & ~(SCHED_FLAG_ALL)) return -EINVAL; /* @@ -4281,7 +4294,7 @@ static int __sched_setscheduler(struct task_struct *p, p->sched_reset_on_fork = reset_on_fork; oldprio = p->prio; - if (pi) { + if (!dl_task(p) && pi) { /* * Take priority boosted tasks into account. If the new * effective priority is unchanged, we just store the new @@ -4683,7 +4696,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, int retval; if (!uattr || pid < 0 || size > PAGE_SIZE || - size < SCHED_ATTR_SIZE_VER0 || flags) + size < SCHED_ATTR_SIZE_VER0) return -EINVAL; rcu_read_lock(); @@ -4700,7 +4713,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, if (p->sched_reset_on_fork) attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; if (task_has_dl_policy(p)) - __getparam_dl(p, &attr); + __getparam_dl(p, &attr, flags); else if (task_has_rt_policy(p)) attr.sched_priority = p->rt_priority; else diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d4e91064b72139..dacbe0d42f23b9 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -542,9 +542,13 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) .sched_flags = SCHED_FLAG_SPECIAL, .sched_nice = 0, .sched_priority = 0, - .sched_runtime = 0, - .sched_deadline = 0, - .sched_period = 0, + /* + * Fake (unused) bandwidth; workaround to "fix" + * priority inheritance. + */ + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, }; struct cpufreq_policy *policy = sg_policy->policy; int ret; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b7c6c9791bf656..7b344095b6d64a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -48,6 +48,9 @@ void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { u64 old = dl_rq->running_bw; + if (unlikely(dl_entity_is_special(dl_se))) + return; + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); dl_rq->running_bw += dl_se->dl_bw; WARN_ON(dl_rq->running_bw < old); /* overflow */ @@ -61,6 +64,9 @@ void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { u64 old = dl_rq->running_bw; + if (unlikely(dl_entity_is_special(dl_se))) + return; + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); dl_rq->running_bw -= dl_se->dl_bw; WARN_ON(dl_rq->running_bw > old); /* underflow */ @@ -75,6 +81,9 @@ void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { u64 old = dl_rq->this_bw; + if (unlikely(dl_entity_is_special(dl_se))) + return; + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); dl_rq->this_bw += dl_se->dl_bw; WARN_ON(dl_rq->this_bw < old); /* overflow */ @@ -85,6 +94,9 @@ void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { u64 old = dl_rq->this_bw; + if (unlikely(dl_entity_is_special(dl_se))) + return; + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); dl_rq->this_bw -= dl_se->dl_bw; WARN_ON(dl_rq->this_bw > old); /* underflow */ @@ -866,7 +878,7 @@ u64 grub_reclaim(u64 delta, struct rq *rq, u64 u) * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). */ -static void update_curr_dl(struct rq *rq) +void update_curr_dl(struct rq *rq) { struct task_struct *curr = rq->curr; struct sched_dl_entity *dl_se = &curr->dl; @@ -914,6 +926,12 @@ static void update_curr_dl(struct rq *rq) throttle: if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { dl_se->dl_throttled = 1; + + /* If requested, inform the user about runtime overruns. */ + if (dl_runtime_exceeded(dl_se) && + (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) + dl_se->dl_overrun = 1; + __dequeue_task_dl(rq, curr, 0); if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f02ae598482fff..796c39eb9a877c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -84,6 +84,8 @@ static inline void update_cpu_load_active(struct rq *this_rq) { } */ #define RUNTIME_INF ((u64)~0ULL) +void update_curr_dl(struct rq *rq); + static inline int idle_policy(int policy) { return policy == SCHED_IDLE; @@ -186,7 +188,7 @@ struct dl_bandwidth { static inline int dl_bandwidth_enabled(void) { - return sysctl_sched_rt_runtime >= 0; + return 0; } extern struct dl_bw *dl_bw_of(int i); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 404eb294b2277a..26682fa916d147 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -12,6 +12,7 @@ #include #include #include +#include /* * Called after updating RLIMIT_CPU to run cpu timer and update @@ -851,6 +852,14 @@ check_timers_list(struct list_head *timers, return 0; } +static inline void check_dl_overrun(struct task_struct *tsk) +{ + if (tsk->dl.dl_overrun) { + tsk->dl.dl_overrun = 0; + __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); + } +} + /* * Check for any per-thread CPU timers that have fired and move them off * the tsk->cpu_timers[N] list onto the firing list. Here we update the @@ -865,6 +874,9 @@ static void check_thread_timers(struct task_struct *tsk, unsigned long long expires; unsigned long soft; + if (dl_task(tsk)) + check_dl_overrun(tsk); + /* * If cputime_expires is zero, then there are no active * per thread CPU timers. @@ -969,6 +981,9 @@ static void check_process_timers(struct task_struct *tsk, struct task_cputime cputime; unsigned long soft; + if (dl_task(tsk)) + check_dl_overrun(tsk); + /* * If cputimer is not running, then there are no active * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). @@ -1175,6 +1190,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) return 1; } + if (dl_task(tsk) && tsk->dl.dl_overrun) + return 1; + return 0; }