Skip to content

Commit 05ae686

Browse files
Alexei Starovoitovborkmann
authored andcommitted
bpf: Refactor alloc_bulk().
Factor out inner body of alloc_bulk into separate helper. No functional changes. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
1 parent 9de3e81 commit 05ae686

File tree

1 file changed

+26
-20
lines changed

1 file changed

+26
-20
lines changed

kernel/bpf/memalloc.c

Lines changed: 26 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
154154
#endif
155155
}
156156

157+
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
158+
{
159+
unsigned long flags;
160+
161+
if (IS_ENABLED(CONFIG_PREEMPT_RT))
162+
/* In RT irq_work runs in per-cpu kthread, so disable
163+
* interrupts to avoid preemption and interrupts and
164+
* reduce the chance of bpf prog executing on this cpu
165+
* when active counter is busy.
166+
*/
167+
local_irq_save(flags);
168+
/* alloc_bulk runs from irq_work which will not preempt a bpf
169+
* program that does unit_alloc/unit_free since IRQs are
170+
* disabled there. There is no race to increment 'active'
171+
* counter. It protects free_llist from corruption in case NMI
172+
* bpf prog preempted this loop.
173+
*/
174+
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
175+
__llist_add(obj, &c->free_llist);
176+
c->free_cnt++;
177+
local_dec(&c->active);
178+
if (IS_ENABLED(CONFIG_PREEMPT_RT))
179+
local_irq_restore(flags);
180+
}
181+
157182
/* Mostly runs from irq_work except __init phase. */
158183
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
159184
{
160185
struct mem_cgroup *memcg = NULL, *old_memcg;
161-
unsigned long flags;
162186
void *obj;
163187
int i;
164188

@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
188212
if (!obj)
189213
break;
190214
}
191-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
192-
/* In RT irq_work runs in per-cpu kthread, so disable
193-
* interrupts to avoid preemption and interrupts and
194-
* reduce the chance of bpf prog executing on this cpu
195-
* when active counter is busy.
196-
*/
197-
local_irq_save(flags);
198-
/* alloc_bulk runs from irq_work which will not preempt a bpf
199-
* program that does unit_alloc/unit_free since IRQs are
200-
* disabled there. There is no race to increment 'active'
201-
* counter. It protects free_llist from corruption in case NMI
202-
* bpf prog preempted this loop.
203-
*/
204-
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
205-
__llist_add(obj, &c->free_llist);
206-
c->free_cnt++;
207-
local_dec(&c->active);
208-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
209-
local_irq_restore(flags);
215+
add_obj_to_free_list(c, obj);
210216
}
211217
set_active_memcg(old_memcg);
212218
mem_cgroup_put(memcg);

0 commit comments

Comments
 (0)