Skip to content

Commit 9774ae0

Browse files
committed
bpf: Use kmalloc_nolock() in range tree
The range tree uses bpf_mem_alloc() that is safe to be called from all contexts and uses a pre-allocated pool of memory to serve these allocations. Replace bpf_mem_alloc() with kmalloc_nolock() as it can be called safely from all contexts and is more scalable than bpf_mem_alloc(). Remove the migrate_disable/enable pairs as they were only needed for bpf_mem_alloc() as it does per-cpu operations, kmalloc_nolock() doesn't need this. Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
1 parent b54a8e1 commit 9774ae0

File tree

1 file changed

+7
-15
lines changed

1 file changed

+7
-15
lines changed

kernel/bpf/range_tree.c

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
33
#include <linux/interval_tree_generic.h>
44
#include <linux/slab.h>
5-
#include <linux/bpf_mem_alloc.h>
65
#include <linux/bpf.h>
76
#include "range_tree.h"
87

@@ -21,7 +20,7 @@
2120
* in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
2221
*
2322
* The implementation relies on external lock to protect rbtree-s.
24-
* The alloc/free of range_node-s is done via bpf_mem_alloc.
23+
* The alloc/free of range_node-s is done via kmalloc_nolock().
2524
*
2625
* bpf arena is using range_tree to represent unallocated slots.
2726
* At init time:
@@ -150,9 +149,8 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
150149
range_it_insert(rn, rt);
151150

152151
/* Add a range */
153-
migrate_disable();
154-
new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
155-
migrate_enable();
152+
new_rn = kmalloc_nolock(sizeof(struct range_node), __GFP_ACCOUNT,
153+
NUMA_NO_NODE);
156154
if (!new_rn)
157155
return -ENOMEM;
158156
new_rn->rn_start = last + 1;
@@ -172,9 +170,7 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
172170
} else {
173171
/* in the middle of the clearing range */
174172
range_it_remove(rn, rt);
175-
migrate_disable();
176-
bpf_mem_free(&bpf_global_ma, rn);
177-
migrate_enable();
173+
kfree_nolock(rn);
178174
}
179175
}
180176
return 0;
@@ -227,9 +223,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
227223
range_it_remove(right, rt);
228224
left->rn_last = right->rn_last;
229225
range_it_insert(left, rt);
230-
migrate_disable();
231-
bpf_mem_free(&bpf_global_ma, right);
232-
migrate_enable();
226+
kfree_nolock(right);
233227
} else if (left) {
234228
/* Combine with the left range */
235229
range_it_remove(left, rt);
@@ -241,9 +235,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
241235
right->rn_start = start;
242236
range_it_insert(right, rt);
243237
} else {
244-
migrate_disable();
245-
left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
246-
migrate_enable();
238+
left = kmalloc_nolock(sizeof(struct range_node), __GFP_ACCOUNT, NUMA_NO_NODE);
247239
if (!left)
248240
return -ENOMEM;
249241
left->rn_start = start;
@@ -259,7 +251,7 @@ void range_tree_destroy(struct range_tree *rt)
259251

260252
while ((rn = range_it_iter_first(rt, 0, -1U))) {
261253
range_it_remove(rn, rt);
262-
bpf_mem_free(&bpf_global_ma, rn);
254+
kfree_nolock(rn);
263255
}
264256
}
265257

0 commit comments

Comments
 (0)