Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
scx: Replace scx_cpu_online percpu var with SCX_RQ_ONLINE flag
Browse files Browse the repository at this point in the history
No need for this to be a separate variable.

- As this removes symbol name collision, rename test_rq_online() to
  scx_rq_online().

- [un]likely() annotation moved from its users to scx_rq_online().

- On/offline status should agree with ops->cpu_on/offline(). In the existing
  code, the two states could deviate when rq_on/offline_scx() were called
  for sched domain updates. Fix it so that they always agree.
  • Loading branch information
htejun committed May 17, 2024
1 parent 3baae12 commit 3a44769
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 23 deletions.
33 changes: 12 additions & 21 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -921,16 +921,6 @@ static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
*/
static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);

/*
* Has the current CPU been onlined from the perspective of scx?
*
* A hotplugged CPU may begin scheduling tasks before the core scheduler will
* call into rq_online_scx(). Track whether we've had a chance to invoke
* ops.cpu_online() so we can skip invoking ops.enqueue() or ops.dispatch() on
* that CPU until the scheduler knows about the hotplug event.
*/
static DEFINE_PER_CPU(bool, scx_cpu_online);

/* dispatch queues */
static struct scx_dispatch_q __cacheline_aligned_in_smp scx_dsq_global;

Expand Down Expand Up @@ -1804,9 +1794,9 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
dispatch_enqueue(dsq, p, enq_flags);
}

static bool test_rq_online(struct rq *rq)
static bool scx_rq_online(struct rq *rq)
{
return per_cpu(scx_cpu_online, cpu_of(rq));
return likely(rq->scx.flags & SCX_RQ_ONLINE);
}

static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
Expand All @@ -1826,7 +1816,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
* offline. We're just trying to on/offline the CPU. Don't bother the
* BPF scheduler.
*/
if (unlikely(!test_rq_online(rq)))
if (!scx_rq_online(rq))
goto local;

if (scx_ops_bypassing()) {
Expand Down Expand Up @@ -2228,7 +2218,7 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq)
return false;
if (!(p->flags & PF_KTHREAD) && unlikely(!task_cpu_possible(cpu, p)))
return false;
if (unlikely(!test_rq_online(rq)))
if (!scx_rq_online(rq))
return false;
return true;
}
Expand Down Expand Up @@ -2383,7 +2373,7 @@ dispatch_to_local_dsq(struct rq *rq, struct rq_flags *rf, u64 dsq_id,
* instead, which should always be safe. As this is an allowed
* behavior, don't trigger an ops error.
*/
if (unlikely(!test_rq_online(dst_rq)))
if (!scx_rq_online(dst_rq))
dst_rq = src_rq;

if (src_rq == dst_rq) {
Expand Down Expand Up @@ -2583,8 +2573,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev,
if (consume_dispatch_q(rq, rf, &scx_dsq_global))
goto has_tasks;

if (!SCX_HAS_OP(dispatch) || scx_ops_bypassing() ||
unlikely(!test_rq_online(rq)))
if (!SCX_HAS_OP(dispatch) || scx_ops_bypassing() || !scx_rq_online(rq))
goto out;

dspc->rq = rq;
Expand Down Expand Up @@ -3223,16 +3212,18 @@ static void handle_hotplug(struct rq *rq, bool online)

static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason)
{
if (reason == RQ_ONOFF_HOTPLUG)
if (reason == RQ_ONOFF_HOTPLUG) {
handle_hotplug(rq, true);
per_cpu(scx_cpu_online, cpu_of(rq)) = true;
rq->scx.flags |= SCX_RQ_ONLINE;
}
}

static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason)
{
per_cpu(scx_cpu_online, cpu_of(rq)) = false;
if (reason == RQ_ONOFF_HOTPLUG)
if (reason == RQ_ONOFF_HOTPLUG) {
rq->scx.flags &= ~SCX_RQ_ONLINE;
handle_hotplug(rq, false);
}
}

#else /* CONFIG_SMP */
Expand Down
10 changes: 8 additions & 2 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -721,8 +721,14 @@ struct cfs_rq {
#ifdef CONFIG_SCHED_CLASS_EXT
/* scx_rq->flags, protected by the rq lock */
enum scx_rq_flags {
SCX_RQ_BALANCING = 1 << 0,
SCX_RQ_CAN_STOP_TICK = 1 << 1,
/*
* A hotplugged CPU starts scheduling before rq_online_scx(). Track
* ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called
* only while the BPF scheduler considers the CPU to be online.
*/
SCX_RQ_ONLINE = 1 << 0,
SCX_RQ_BALANCING = 1 << 1,
SCX_RQ_CAN_STOP_TICK = 1 << 2,
};

struct scx_rq {
Expand Down

0 comments on commit 3a44769

Please sign in to comment.