Skip to content

Commit

Permalink
scx_lavd: Do not set task's time slice to zero at the ops.enqueue() path
Browse files Browse the repository at this point in the history
Note that at ops.enqueue() path, setting a task's slice to zero is risky
because we don't know the exact status of the task, so it could cause a
zero time slice error as follows:

   [ 8271.001818] sched_ext: ksoftirqd/1[70] has zero slice in pick_task_scx()

The zero slice warning is harmful because the sched_ext core ends up
setting the time slice to SCX_SLICE_DFL (20 msec), increasing latency spikes.

Thus, we do not set the time slice to 0 at the ops.enqueue() path and rely
on scx_bpf_kick_cpu(SCX_KICK_PREEMPT) all the time. Also, use 1 (instead of 0)
as a marker to perform scx_bpf_kick_cpu().

This should solve the following issue:

   #1283

Signed-off-by: Changwoo Min <changwoo@igalia.com>
  • Loading branch information
Changwoo Min committed Feb 1, 2025
1 parent 57c7129 commit e812896
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 46 deletions.
18 changes: 9 additions & 9 deletions scheds/rust/scx_lavd/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1024,16 +1024,16 @@ static u64 find_proper_dsq(struct task_ctx *taskc, struct cpu_ctx *cpuc)
return cpuc->cpdom_alt_id;
}

static bool try_kick_task_idle_cpu(struct task_struct *p, struct task_ctx *taskc)
static bool try_kick_task_idle_cpu(struct task_struct *p,
struct task_ctx *taskc, s32 prev_cpu)
{
bool found_idle = false;
s32 prev_cpu, cpu;
s32 cpu;

/*
* Find an idle cpu but do not reserve the idle cpu. That is because
* there is no guarantee the idle cpu will be picked up at this point.
*/
prev_cpu = scx_bpf_task_cpu(p);
cpu = find_idle_cpu(p, taskc, prev_cpu, 0, false, &found_idle);
if (found_idle && cpu >= 0) {
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
Expand Down Expand Up @@ -1082,7 +1082,7 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
{
struct cpu_ctx *cpuc_task, *cpuc_cur;
struct task_ctx *taskc;
s32 cpu_id;
s32 prev_cpu;
u64 dsq_id, now;

/*
Expand All @@ -1096,9 +1096,9 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
* always put the task to the global DSQ, so any idle CPU can pick it
* up.
*/
cpu_id = scx_bpf_task_cpu(p);
taskc = get_task_ctx(p);
cpuc_task = get_cpu_ctx_id(cpu_id);
prev_cpu = scx_bpf_task_cpu(p);
cpuc_task = get_cpu_ctx_id(prev_cpu);
cpuc_cur = get_cpu_ctx();
if (!cpuc_cur || !cpuc_task || !taskc)
return;
Expand All @@ -1115,8 +1115,8 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
*/
dsq_id = find_proper_dsq(taskc, cpuc_task);
now = scx_bpf_now();
if (can_direct_dispatch(p, taskc, cpuc_task, cpu_id, &enq_flags, now)) {
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu_id,
if (can_direct_dispatch(p, taskc, cpuc_task, prev_cpu, &enq_flags, now)) {
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | prev_cpu,
p->scx.slice, enq_flags);
return;
}
Expand All @@ -1128,7 +1128,7 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
* If there is an idle cpu for the task, try to kick it up now
* so it can consume the task immediately.
*/
if (try_kick_task_idle_cpu(p, taskc))
if (try_kick_task_idle_cpu(p, taskc, prev_cpu))
return;

/*
Expand Down
45 changes: 8 additions & 37 deletions scheds/rust/scx_lavd/src/bpf/preempt.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,18 +130,8 @@ static struct cpu_ctx *find_victim_cpu(const struct cpumask *cpumask,
cur_cpu = cpuc->cpu_id;

/*
* First, test the current CPU since it can skip the expensive IPI.
*/
if (can_cpu_be_kicked(now, cpuc) &&
bpf_cpumask_test_cpu(cur_cpu, cpumask) &&
can_cpu1_kick_cpu2(&prm_task, &prm_cpus[0], cpuc)) {
victim_cpu = &prm_task;
goto bingo_out;
}

/*
* If the current CPU cannot be a victim, let's check if it is worth to
* try to kick other CPU at the expense of IPI.
* First check if it is worth to try to kick other CPU
* at the expense of IPI.
*/
if (!is_worth_kick_other_task(taskc))
goto null_out;
Expand Down Expand Up @@ -216,11 +206,6 @@ static struct cpu_ctx *find_victim_cpu(const struct cpumask *cpumask,
return NULL;
}

static void kick_current_cpu(struct task_struct *p)
{
WRITE_ONCE(p->scx.slice, 0);
}

static bool try_kick_cpu(struct task_struct *p, struct cpu_ctx *cpuc_cur,
struct cpu_ctx *victim_cpuc)
{
Expand All @@ -234,29 +219,15 @@ static bool try_kick_cpu(struct task_struct *p, struct cpu_ctx *cpuc_cur,
u64 old;
bool ret = false;

/*
* If the current CPU is a victim, we just reset the current task's
* time slice as an optimization. Othewise, kick the remote CPU for
* preemption.
*
* Resetting task's time slice to zero does not trigger an immediate
* preemption. However, the cost of self-IPI is prohibitively expensive
* for some scenarios. The actual preemption will happen at the next
* ops.tick().
*/
if (cpuc_cur->cpu_id == victim_cpuc->cpu_id) {
struct task_struct *tsk = bpf_get_current_task_btf();
kick_current_cpu(tsk);
return true;
}

/*
* Kick a victim CPU if it is not victimized yet by another
* concurrent kick task.
*
*
*/
old = p->scx.slice;
if (old != 0)
ret = __sync_bool_compare_and_swap(&p->scx.slice, old, 0);
if (old != 1 && old != 0)
ret = __sync_bool_compare_and_swap(&p->scx.slice, old, 1);

/*
* Kick the remote CPU for preemption.
Expand Down Expand Up @@ -329,7 +300,7 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
* give up its extended time slice for fairness.
*/
if (taskc_run->lock_holder_xted) {
kick_current_cpu(p_run);
p_run->scx.slice = 0;
return true;
}

Expand Down Expand Up @@ -367,7 +338,7 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
bpf_rcu_read_unlock();

if (ret)
kick_current_cpu(p_run);
p_run->scx.slice = 0;

return ret;
}
Expand Down

0 comments on commit e812896

Please sign in to comment.