Skip to content

Commit

Permalink
padrgn.helpers.linux.slab: add support for retrieving objects from pa…
Browse files Browse the repository at this point in the history
…rtial slabs

The current implementation of for_each_allocated_object() is slow as
it iterates through every physical page.

This commit adds the ability to retrieve objects from the per-node
partial lists and the per-cpu partial slab lists, greatly improving
efficiency when searching for the source of vfs caches of dying
cgroups or millions of negative dentries.

Signed-off-by: Jian Wen <wenjianhn@gmail.com>
  • Loading branch information
wenjianhn committed Jul 3, 2023
1 parent c69e5b1 commit 633bfc1
Show file tree
Hide file tree
Showing 3 changed files with 162 additions and 0 deletions.
62 changes: 62 additions & 0 deletions drgn/helpers/linux/slab.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
"get_slab_cache_aliases",
"print_slab_caches",
"slab_cache_for_each_allocated_object",
"slab_cache_for_each_partial_slab_object",
"slab_cache_is_merged",
"slab_object_info",
)
Expand Down Expand Up @@ -231,6 +232,39 @@ def for_each_allocated_object(self, type: Union[str, Type]) -> Iterator[Object]:
if slab.slab_cache == self._slab_cache:
yield from self._page_objects(page, slab, pointer_type)

def for_each_partial_slab_object(self, type: Union[str, Type]) -> Iterator[Object]:
pointer_type = self._prog.pointer_type(self._prog.type(type))
cpu_slab = self._slab_cache.cpu_slab.read_()

# per-cpu partial slabs
if hasattr(cpu_slab, "slab"):
slab_ctype = "struct slab *"
else:
slab_ctype = "struct page *"

for cpu in for_each_online_cpu(self._prog):
this_cpu_slab = per_cpu_ptr(cpu_slab, cpu)
slab = this_cpu_slab.partial
if slab != NULL(self._prog, slab_ctype):
yield from self._page_objects(
cast("struct page *", slab), slab, pointer_type
)

# per-node partial slabs
if hasattr(cpu_slab, "slab"):
struct = "struct slab"
member = "slab_list"
else:
struct = "struct page"
member = "lru"

for node in range(self._prog["nr_online_nodes"].value_()):
n = self._slab_cache.node[node]
for slab in list_for_each_entry(struct, n.partial.address_of_(), member):
yield from self._page_objects(
cast("struct page *", slab), slab, pointer_type
)

def object_info(
self, page: Object, slab: Object, addr: int
) -> "Optional[SlabObjectInfo]":
Expand Down Expand Up @@ -450,6 +484,34 @@ def slab_cache_for_each_allocated_object(
return _get_slab_cache_helper(slab_cache).for_each_allocated_object(type)


def slab_cache_for_each_partial_slab_object(
slab_cache: Object, type: Union[str, Type]
) -> Iterator[Object]:
"""
Iterate over all allocated objects in a given slab cache's
per-node partial slabs and per-cpu partial slabs.
Only the SLUB allocator is supported now.
>>> dentry_cache = find_slab_cache(prog, "dentry")
>>> next(slab_cache_for_each_partial_slab_object(dentry_cache, "struct dentry")).d_name.name
(const unsigned char *)0xffff93390051c038 = "cgroup"
>>> for s in slab_cache_for_each_partial_slab_object(dentry_cache, "struct dentry"):
... print(s.d_name.name)
...
(const unsigned char *)0xffff93390051c038 = "cgroup"
(const unsigned char *)0xffff93390051c0f8 = "cmdline"
(const unsigned char *)0xffff93390051c1b8 = "8:85355"
(const unsigned char *)0xffff93390051c278 = "cmdline"
:param slab_cache: ``struct kmem_cache *``
:param type: Type of object in the slab cache.
:return: Iterator of ``type *`` objects.
"""
return _get_slab_cache_helper(slab_cache).for_each_partial_slab_object(type)


def _find_containing_slab(
prog: Program, addr: int
) -> Optional[Tuple[Object, Object, Object]]:
Expand Down
29 changes: 29 additions & 0 deletions tests/linux_kernel/helpers/test_slab.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
for_each_slab_cache,
get_slab_cache_aliases,
slab_cache_for_each_allocated_object,
slab_cache_for_each_partial_slab_object,
slab_cache_is_merged,
slab_object_info,
)
Expand Down Expand Up @@ -160,6 +161,34 @@ def test_slab_cache_for_each_allocated_object(self):
list(objects),
)

@skip_unless_have_full_mm_support
@skip_unless_have_test_kmod
def test_slab_cache_node_partial_object(self):
self.assertEqual(
sum(
o.value
for o in slab_cache_for_each_partial_slab_object(
self.prog["drgn_test_node_partial_kmem_cache"],
"struct drgn_test_node_partial_slab_object",
)
),
100,
)

@skip_unless_have_full_mm_support
@skip_unless_have_test_kmod
def test_slab_cache_cpu_partial_object(self):
self.assertEqual(
sum(
o.value
for o in slab_cache_for_each_partial_slab_object(
self.prog["drgn_test_cpu_partial_kmem_cache"],
"struct drgn_test_cpu_partial_slab_object",
)
),
300,
)

@skip_unless_have_full_mm_support
@skip_unless_have_test_kmod
def test_slab_object_info(self):
Expand Down
71 changes: 71 additions & 0 deletions tests/linux_kernel/kmod/drgn_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,24 @@ struct drgn_test_big_slab_object {
struct drgn_test_small_slab_object *drgn_test_small_slab_objects[5];
struct drgn_test_big_slab_object *drgn_test_big_slab_objects[5];

struct kmem_cache *drgn_test_node_partial_kmem_cache;

struct drgn_test_node_partial_slab_object {
unsigned long value;
};

struct drgn_test_node_partial_slab_object *drgn_test_node_partial_slab_object_p;

struct kmem_cache *drgn_test_cpu_partial_kmem_cache;

struct drgn_test_cpu_partial_slab_object {
unsigned long padding[(PAGE_SIZE * 2) / sizeof(unsigned long) - 1
- (sizeof(void *) / sizeof(unsigned long))];
unsigned long value;
};

struct drgn_test_cpu_partial_slab_object *drgn_test_cpu_partial_slab_objects[5];

static void drgn_test_slab_exit(void)
{
size_t i;
Expand All @@ -386,9 +404,27 @@ static void drgn_test_slab_exit(void)
}
kmem_cache_destroy(drgn_test_small_kmem_cache);
}
if (drgn_test_node_partial_kmem_cache) {
if (drgn_test_node_partial_slab_object_p)
kmem_cache_free(drgn_test_node_partial_kmem_cache,
drgn_test_node_partial_slab_object_p);
kmem_cache_destroy(drgn_test_node_partial_kmem_cache);
}
if (drgn_test_cpu_partial_kmem_cache) {
for (i = 0; i < ARRAY_SIZE(drgn_test_cpu_partial_slab_objects); i++) {
if (drgn_test_cpu_partial_slab_objects[i]) {
kmem_cache_free(drgn_test_cpu_partial_kmem_cache,
drgn_test_cpu_partial_slab_objects[i]);
}
}
kmem_cache_destroy(drgn_test_cpu_partial_kmem_cache);
}
}

// Dummy constructor so test slab caches won't get merged.
// Note that the free pointer is outside of the object of a slab with a destructor.
// As a result, each object costs sizeof(void *) more bytes.
// See https://github.com/torvalds/linux/blob/v6.4/mm/slub.c#L4393
static void drgn_test_slab_ctor(void *arg)
{
}
Expand Down Expand Up @@ -426,6 +462,41 @@ static int drgn_test_slab_init(void)
return -ENOMEM;
drgn_test_big_slab_objects[i]->value = i;
}

drgn_test_node_partial_kmem_cache =
kmem_cache_create(
"drgn_test_partial",
sizeof(struct drgn_test_node_partial_slab_object),
__alignof__(struct drgn_test_node_partial_slab_object),
0, drgn_test_slab_ctor);
if (!drgn_test_node_partial_kmem_cache)
return -ENOMEM;
drgn_test_node_partial_slab_object_p = kmem_cache_alloc(
drgn_test_node_partial_kmem_cache, GFP_KERNEL);
drgn_test_node_partial_slab_object_p->value = 100;

// Move the object to the per-node partial list
kmem_cache_shrink(drgn_test_node_partial_kmem_cache);

drgn_test_cpu_partial_kmem_cache = kmem_cache_create(
"drgn_test_cpu_partial_kmem_cache",
sizeof(struct drgn_test_cpu_partial_slab_object),
__alignof__(struct drgn_test_cpu_partial_slab_object),
0, drgn_test_slab_ctor);
if (!drgn_test_cpu_partial_kmem_cache)
return -ENOMEM;

for (i = 0; i < ARRAY_SIZE(drgn_test_cpu_partial_slab_objects); i++) {
drgn_test_cpu_partial_slab_objects[i] = kmem_cache_alloc(
drgn_test_cpu_partial_kmem_cache, GFP_KERNEL);
drgn_test_cpu_partial_slab_objects[i]->value = 100;
}

// Free the first object to make a cpu partial slab
kmem_cache_free(drgn_test_cpu_partial_kmem_cache,
drgn_test_cpu_partial_slab_objects[0]);
drgn_test_cpu_partial_slab_objects[0] = NULL;

return 0;
}

Expand Down

0 comments on commit 633bfc1

Please sign in to comment.