Skip to content

Commit 0409819

Browse files
DispatchCodeAlexei Starovoitov
authored andcommitted
bpf: replace use of system_unbound_wq with system_dfl_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. queue_work() / queue_delayed_work() / mod_delayed_work() will now use the new unbound wq: whether the user still use the old wq a warn will be printed along with a wq redirect to the new one. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://lore.kernel.org/r/20250905085309.94596-3-marco.crivellari@suse.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 34f8608 commit 0409819

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

kernel/bpf/helpers.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,7 +1594,7 @@ void bpf_timer_cancel_and_free(void *val)
15941594
* timer callback.
15951595
*/
15961596
if (this_cpu_read(hrtimer_running)) {
1597-
queue_work(system_unbound_wq, &t->cb.delete_work);
1597+
queue_work(system_dfl_wq, &t->cb.delete_work);
15981598
return;
15991599
}
16001600

@@ -1607,7 +1607,7 @@ void bpf_timer_cancel_and_free(void *val)
16071607
if (hrtimer_try_to_cancel(&t->timer) >= 0)
16081608
kfree_rcu(t, cb.rcu);
16091609
else
1610-
queue_work(system_unbound_wq, &t->cb.delete_work);
1610+
queue_work(system_dfl_wq, &t->cb.delete_work);
16111611
} else {
16121612
bpf_timer_delete_work(&t->cb.delete_work);
16131613
}

kernel/bpf/memalloc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -736,7 +736,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
736736
/* Defer barriers into worker to let the rest of map memory to be freed */
737737
memset(ma, 0, sizeof(*ma));
738738
INIT_WORK(&copy->work, free_mem_alloc_deferred);
739-
queue_work(system_unbound_wq, &copy->work);
739+
queue_work(system_dfl_wq, &copy->work);
740740
}
741741

742742
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)

kernel/bpf/syscall.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -905,7 +905,7 @@ static void bpf_map_free_in_work(struct bpf_map *map)
905905
/* Avoid spawning kworkers, since they all might contend
906906
* for the same mutex like slab_mutex.
907907
*/
908-
queue_work(system_unbound_wq, &map->work);
908+
queue_work(system_dfl_wq, &map->work);
909909
}
910910

911911
static void bpf_map_free_rcu_gp(struct rcu_head *rcu)

0 commit comments

Comments
 (0)