Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
bpf: wq: add bpf_wq_init
Browse files Browse the repository at this point in the history
We need to teach the verifier about the second argument which is declared
as void * but which is of type KF_ARG_PTR_TO_MAP. We could have dropped
this extra case if we declared the second argument as struct bpf_map *,
but that means users will have to do extra casting to have their program
compile.

We also need to duplicate the timer code for the checking if the map
argument is matching the provided workqueue.

Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-11-6c986a5a741f@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Benjamin Tissoires authored and Alexei Starovoitov committed Apr 24, 2024
1 parent b4abee7 commit eb48f6c
Showing 1 changed file with 102 additions and 2 deletions.
104 changes: 102 additions & 2 deletions kernel/bpf/helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -1109,11 +1109,18 @@ struct bpf_hrtimer {
struct hrtimer timer;
};

/* the actual struct hidden inside uapi struct bpf_timer */
struct bpf_work {
struct bpf_async_cb cb;
struct work_struct work;
struct work_struct delete_work;
};

/* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
struct bpf_async_kern {
union {
struct bpf_async_cb *cb;
struct bpf_hrtimer *timer;
struct bpf_work *work;
};
/* bpf_spin_lock is used here instead of spinlock_t to make
* sure that it always fits into space reserved by struct bpf_timer
Expand All @@ -1124,6 +1131,7 @@ struct bpf_async_kern {

enum bpf_async_type {
BPF_ASYNC_TYPE_TIMER = 0,
BPF_ASYNC_TYPE_WQ,
};

static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
Expand Down Expand Up @@ -1167,11 +1175,64 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
return HRTIMER_NORESTART;
}

static void bpf_wq_work(struct work_struct *work)
{
struct bpf_work *w = container_of(work, struct bpf_work, work);
struct bpf_tramp_run_ctx __maybe_unused run_ctx;
struct bpf_async_cb *cb = &w->cb;
struct bpf_prog *prog = cb->prog;
struct bpf_map *map = cb->map;
bpf_callback_t callback_fn;
void *value = cb->value;
void *key;
u32 idx;

BTF_TYPE_EMIT(struct bpf_wq);

callback_fn = READ_ONCE(cb->callback_fn);
if (!callback_fn || !prog)
return;

if (map->map_type == BPF_MAP_TYPE_ARRAY) {
struct bpf_array *array = container_of(map, struct bpf_array, map);

/* compute the key */
idx = ((char *)value - array->value) / array->elem_size;
key = &idx;
} else { /* hash or lru */
key = value - round_up(map->key_size, 8);
}

run_ctx.bpf_cookie = 0;

if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
return;
}

callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
/* The verifier checked that return value is zero. */

__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
&run_ctx);
}

static void bpf_wq_delete_work(struct work_struct *work)
{
struct bpf_work *w = container_of(work, struct bpf_work, delete_work);

cancel_work_sync(&w->work);

kfree_rcu(w, cb.rcu);
}

static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
enum bpf_async_type type)
{
struct bpf_async_cb *cb;
struct bpf_hrtimer *t;
struct bpf_work *w;
clockid_t clockid;
size_t size;
int ret = 0;
Expand All @@ -1183,6 +1244,9 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
case BPF_ASYNC_TYPE_TIMER:
size = sizeof(struct bpf_hrtimer);
break;
case BPF_ASYNC_TYPE_WQ:
size = sizeof(struct bpf_work);
break;
default:
return -EINVAL;
}
Expand All @@ -1201,13 +1265,22 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
goto out;
}

if (type == BPF_ASYNC_TYPE_TIMER) {
switch (type) {
case BPF_ASYNC_TYPE_TIMER:
clockid = flags & (MAX_CLOCKS - 1);
t = (struct bpf_hrtimer *)cb;

hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
t->timer.function = bpf_timer_cb;
cb->value = (void *)async - map->record->timer_off;
break;
case BPF_ASYNC_TYPE_WQ:
w = (struct bpf_work *)cb;

INIT_WORK(&w->work, bpf_wq_work);
INIT_WORK(&w->delete_work, bpf_wq_delete_work);
cb->value = (void *)async - map->record->wq_off;
break;
}
cb->map = map;
cb->prog = NULL;
Expand Down Expand Up @@ -1473,7 +1546,19 @@ void bpf_timer_cancel_and_free(void *val)
*/
void bpf_wq_cancel_and_free(void *val)
{
struct bpf_work *work;

BTF_TYPE_EMIT(struct bpf_wq);

work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
if (!work)
return;
/* Trigger cancel of the sleepable work, but *do not* wait for
* it to finish if it was running as we might not be in a
* sleepable context.
* kfree will be called once the work has finished.
*/
schedule_work(&work->delete_work);
}

BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
Expand Down Expand Up @@ -2612,6 +2697,20 @@ __bpf_kfunc void bpf_throw(u64 cookie)
WARN(1, "A call to BPF exception callback should never return\n");
}

__bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
{
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
struct bpf_map *map = p__map;

BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));

if (flags)
return -EINVAL;

return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(generic_btf_ids)
Expand Down Expand Up @@ -2689,6 +2788,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone)
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
BTF_ID_FLAGS(func, bpf_wq_init)
BTF_KFUNCS_END(common_btf_ids)

static const struct btf_kfunc_id_set common_kfunc_set = {
Expand Down

0 comments on commit eb48f6c

Please sign in to comment.