Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dl walleye 4.4 ab #1

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1511,6 +1511,7 @@ struct sched_dl_entity {
* code.
*/
int dl_throttled, dl_boosted, dl_yielded, dl_non_contending;
int dl_overrun;

/*
* Bandwidth enforcement timer. Each -deadline task has its
Expand Down
8 changes: 7 additions & 1 deletion include/uapi/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,12 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
#define SCHED_FLAG_SPECIAL 0x04
#define SCHED_FLAG_DL_OVERRUN 0x04
#define SCHED_FLAG_SPECIAL 0x08

#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
SCHED_FLAG_RECLAIM | \
SCHED_FLAG_SPECIAL | \
SCHED_FLAG_DL_OVERRUN)

#endif /* _UAPI_LINUX_SCHED_H */
33 changes: 23 additions & 10 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2233,6 +2233,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
}

/*
Expand Down Expand Up @@ -4006,14 +4007,29 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
}

static void
__getparam_dl(struct task_struct *p, struct sched_attr *attr)
__getparam_dl(struct task_struct *p, struct sched_attr *attr, unsigned int flags)
{
struct sched_dl_entity *dl_se = &p->dl;

attr->sched_priority = p->rt_priority;
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;

if (flags == 1 && p == current) {
update_curr_dl(task_rq(p));

/*
* sched_runtime can never be negative because, since this
* operation can be performed by the task on its own
* sched_attr, if the bandwidth is <= 0, then the task is
* throttled and therefore cannot perform the syscall.
*/
attr->sched_runtime = dl_se->runtime;
attr->sched_deadline = dl_se->deadline;
} else {
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;
}
attr->sched_period = dl_se->dl_period;

attr->sched_flags = dl_se->flags;
}

Expand Down Expand Up @@ -4119,10 +4135,7 @@ static int __sched_setscheduler(struct task_struct *p,
return -EINVAL;
}

if (attr->sched_flags &
~(SCHED_FLAG_RESET_ON_FORK |
SCHED_FLAG_RECLAIM |
SCHED_FLAG_SPECIAL))
if (attr->sched_flags & ~(SCHED_FLAG_ALL))
return -EINVAL;

/*
Expand Down Expand Up @@ -4281,7 +4294,7 @@ static int __sched_setscheduler(struct task_struct *p,
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;

if (pi) {
if (!dl_task(p) && pi) {
/*
* Take priority boosted tasks into account. If the new
* effective priority is unchanged, we just store the new
Expand Down Expand Up @@ -4683,7 +4696,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
int retval;

if (!uattr || pid < 0 || size > PAGE_SIZE ||
size < SCHED_ATTR_SIZE_VER0 || flags)
size < SCHED_ATTR_SIZE_VER0)
return -EINVAL;

rcu_read_lock();
Expand All @@ -4700,7 +4713,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
if (p->sched_reset_on_fork)
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
if (task_has_dl_policy(p))
__getparam_dl(p, &attr);
__getparam_dl(p, &attr, flags);
else if (task_has_rt_policy(p))
attr.sched_priority = p->rt_priority;
else
Expand Down
10 changes: 7 additions & 3 deletions kernel/sched/cpufreq_schedutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -542,9 +542,13 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
.sched_flags = SCHED_FLAG_SPECIAL,
.sched_nice = 0,
.sched_priority = 0,
.sched_runtime = 0,
.sched_deadline = 0,
.sched_period = 0,
/*
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
.sched_runtime = 1000000,
.sched_deadline = 10000000,
.sched_period = 10000000,
};
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
Expand Down
20 changes: 19 additions & 1 deletion kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;

if (unlikely(dl_entity_is_special(dl_se)))
return;

lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw += dl_se->dl_bw;
WARN_ON(dl_rq->running_bw < old); /* overflow */
Expand All @@ -61,6 +64,9 @@ void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;

if (unlikely(dl_entity_is_special(dl_se)))
return;

lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw -= dl_se->dl_bw;
WARN_ON(dl_rq->running_bw > old); /* underflow */
Expand All @@ -75,6 +81,9 @@ void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;

if (unlikely(dl_entity_is_special(dl_se)))
return;

lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw += dl_se->dl_bw;
WARN_ON(dl_rq->this_bw < old); /* overflow */
Expand All @@ -85,6 +94,9 @@ void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;

if (unlikely(dl_entity_is_special(dl_se)))
return;

lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw -= dl_se->dl_bw;
WARN_ON(dl_rq->this_bw > old); /* underflow */
Expand Down Expand Up @@ -866,7 +878,7 @@ u64 grub_reclaim(u64 delta, struct rq *rq, u64 u)
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
*/
static void update_curr_dl(struct rq *rq)
void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
Expand Down Expand Up @@ -914,6 +926,12 @@ static void update_curr_dl(struct rq *rq)
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;

/* If requested, inform the user about runtime overruns. */
if (dl_runtime_exceeded(dl_se) &&
(dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se->dl_overrun = 1;

__dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
Expand Down
4 changes: 3 additions & 1 deletion kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
*/
#define RUNTIME_INF ((u64)~0ULL)

void update_curr_dl(struct rq *rq);

static inline int idle_policy(int policy)
{
return policy == SCHED_IDLE;
Expand Down Expand Up @@ -186,7 +188,7 @@ struct dl_bandwidth {

static inline int dl_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
return 0;
}

extern struct dl_bw *dl_bw_of(int i);
Expand Down
18 changes: 18 additions & 0 deletions kernel/time/posix-cpu-timers.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <linux/random.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/sched/deadline.h>

/*
* Called after updating RLIMIT_CPU to run cpu timer and update
Expand Down Expand Up @@ -851,6 +852,14 @@ check_timers_list(struct list_head *timers,
return 0;
}

static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
tsk->dl.dl_overrun = 0;
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}

/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
Expand All @@ -865,6 +874,9 @@ static void check_thread_timers(struct task_struct *tsk,
unsigned long long expires;
unsigned long soft;

if (dl_task(tsk))
check_dl_overrun(tsk);

/*
* If cputime_expires is zero, then there are no active
* per thread CPU timers.
Expand Down Expand Up @@ -969,6 +981,9 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime;
unsigned long soft;

if (dl_task(tsk))
check_dl_overrun(tsk);

/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
Expand Down Expand Up @@ -1175,6 +1190,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
return 1;
}

if (dl_task(tsk) && tsk->dl.dl_overrun)
return 1;

return 0;
}

Expand Down