Skip to content

Commit 18e027b

Browse files
Alexei Starovoitovborkmann
authored andcommitted
bpf: Factor out inc/dec of active flag into helpers.
Factor out local_inc/dec_return(&c->active) into helpers. No functional changes. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20230706033447.54696-6-alexei.starovoitov@gmail.com
1 parent 05ae686 commit 18e027b

File tree

1 file changed

+18
-12
lines changed

1 file changed

+18
-12
lines changed

kernel/bpf/memalloc.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -154,31 +154,41 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
154154
#endif
155155
}
156156

157-
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
157+
static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
158158
{
159-
unsigned long flags;
160-
161159
if (IS_ENABLED(CONFIG_PREEMPT_RT))
162160
/* In RT irq_work runs in per-cpu kthread, so disable
163161
* interrupts to avoid preemption and interrupts and
164162
* reduce the chance of bpf prog executing on this cpu
165163
* when active counter is busy.
166164
*/
167-
local_irq_save(flags);
165+
local_irq_save(*flags);
168166
/* alloc_bulk runs from irq_work which will not preempt a bpf
169167
* program that does unit_alloc/unit_free since IRQs are
170168
* disabled there. There is no race to increment 'active'
171169
* counter. It protects free_llist from corruption in case NMI
172170
* bpf prog preempted this loop.
173171
*/
174172
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
175-
__llist_add(obj, &c->free_llist);
176-
c->free_cnt++;
173+
}
174+
175+
static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
176+
{
177177
local_dec(&c->active);
178178
if (IS_ENABLED(CONFIG_PREEMPT_RT))
179179
local_irq_restore(flags);
180180
}
181181

182+
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
183+
{
184+
unsigned long flags;
185+
186+
inc_active(c, &flags);
187+
__llist_add(obj, &c->free_llist);
188+
c->free_cnt++;
189+
dec_active(c, flags);
190+
}
191+
182192
/* Mostly runs from irq_work except __init phase. */
183193
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
184194
{
@@ -300,17 +310,13 @@ static void free_bulk(struct bpf_mem_cache *c)
300310
int cnt;
301311

302312
do {
303-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
304-
local_irq_save(flags);
305-
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
313+
inc_active(c, &flags);
306314
llnode = __llist_del_first(&c->free_llist);
307315
if (llnode)
308316
cnt = --c->free_cnt;
309317
else
310318
cnt = 0;
311-
local_dec(&c->active);
312-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
313-
local_irq_restore(flags);
319+
dec_active(c, flags);
314320
if (llnode)
315321
enque_to_free(c, llnode);
316322
} while (cnt > (c->high_watermark + c->low_watermark) / 2);

0 commit comments

Comments
 (0)