Skip to content

Commit aa857fd

Browse files
committed
drgn.helpers.linux.slab: add support for retrieving objects exclusively from per-node lists
The current implementation of for_each_allocated_object() is slow as it iterates through every physical page. This commit adds the ability to retrieve objects exclusively from the per-node partial lists, greatly improving efficiency when searching for the source of vfs caches of dying cgroups or millions of negative dentries. Signed-off-by: Jian Wen <[email protected]>
1 parent e9141fa commit aa857fd

File tree

3 files changed

+72
-3
lines changed

3 files changed

+72
-3
lines changed

drgn/helpers/linux/slab.py

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -217,8 +217,28 @@ def _page_objects(
217217
) -> Iterator[Object]:
218218
raise NotImplementedError()
219219

220-
def for_each_allocated_object(self, type: Union[str, Type]) -> Iterator[Object]:
220+
def for_each_allocated_object(
221+
self, type: Union[str, Type], node_partial_only: Optional[bool] = None
222+
) -> Iterator[Object]:
221223
pointer_type = self._prog.pointer_type(self._prog.type(type))
224+
225+
if node_partial_only:
226+
cpu_slab = self._slab_cache.cpu_slab.read_()
227+
if hasattr(cpu_slab, "slab"):
228+
struct = 'struct slab'
229+
member = 'slab_list'
230+
else:
231+
struct = 'struct page'
232+
member = 'lru'
233+
234+
for node in range(self._prog['nr_online_nodes'].value_()):
235+
n = self._slab_cache.node[node]
236+
for slab in list_for_each_entry(
237+
struct, n.partial.address_of_(), member):
238+
yield from self._page_objects(
239+
cast("struct page *", slab), slab, pointer_type)
240+
return
241+
222242
slab_type = _get_slab_type(self._prog)
223243
PG_slab_mask = 1 << self._prog.constant("PG_slab")
224244
for page in for_each_page(self._prog):
@@ -429,7 +449,8 @@ def _get_slab_cache_helper(slab_cache: Object) -> _SlabCacheHelper:
429449

430450

431451
def slab_cache_for_each_allocated_object(
432-
slab_cache: Object, type: Union[str, Type]
452+
slab_cache: Object, type: Union[str, Type],
453+
node_partial_only: Optional[bool] = None
433454
) -> Iterator[Object]:
434455
"""
435456
Iterate over all allocated objects in a given slab cache.
@@ -443,11 +464,19 @@ def slab_cache_for_each_allocated_object(
443464
...
444465
}
445466
467+
Only objects on a per-node partial slab list
468+
>>> next(slab_cache_for_each_allocated_object(dentry_cache, "struct dentry", node_partial_only=True))
469+
*(struct dentry *)0xffff95b9beebc000 = {
470+
...
471+
}
472+
446473
:param slab_cache: ``struct kmem_cache *``
447474
:param type: Type of object in the slab cache.
475+
:param node_partial_only: only objects on a per-node partial slab list
448476
:return: Iterator of ``type *`` objects.
449477
"""
450-
return _get_slab_cache_helper(slab_cache).for_each_allocated_object(type)
478+
return _get_slab_cache_helper(slab_cache).for_each_allocated_object(
479+
type, node_partial_only)
451480

452481

453482
def _find_containing_slab(

tests/linux_kernel/helpers/test_slab.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,17 @@ def test_slab_cache_for_each_allocated_object(self):
160160
list(objects),
161161
)
162162

163+
@skip_unless_have_full_mm_support
164+
@skip_unless_have_test_kmod
165+
def test_slab_cache_for_each_allocated_object_on_partial_list(self):
166+
self.assertEqual(
167+
sum(1 for _ in slab_cache_for_each_allocated_object(
168+
self.prog["drgn_test_node_partial_kmem_cache"],
169+
"struct drgn_test_node_partial_slab_object", True)
170+
),
171+
1
172+
)
173+
163174
@skip_unless_have_full_mm_support
164175
@skip_unless_have_test_kmod
165176
def test_slab_object_info(self):

tests/linux_kernel/kmod/drgn_test.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,14 @@ struct drgn_test_big_slab_object {
364364
struct drgn_test_small_slab_object *drgn_test_small_slab_objects[5];
365365
struct drgn_test_big_slab_object *drgn_test_big_slab_objects[5];
366366

367+
struct kmem_cache *drgn_test_node_partial_kmem_cache;
368+
369+
struct drgn_test_node_partial_slab_object {
370+
unsigned long unused;
371+
};
372+
373+
struct drgn_test_node_partial_slab_object *drgn_test_node_partial_slab_object_p;
374+
367375
static void drgn_test_slab_exit(void)
368376
{
369377
size_t i;
@@ -386,6 +394,12 @@ static void drgn_test_slab_exit(void)
386394
}
387395
kmem_cache_destroy(drgn_test_small_kmem_cache);
388396
}
397+
if (drgn_test_node_partial_kmem_cache) {
398+
if (drgn_test_node_partial_slab_object_p)
399+
kmem_cache_free(drgn_test_node_partial_kmem_cache,
400+
drgn_test_node_partial_slab_object_p);
401+
kmem_cache_destroy(drgn_test_node_partial_kmem_cache);
402+
}
389403
}
390404

391405
// Dummy constructor so test slab caches won't get merged.
@@ -426,6 +440,21 @@ static int drgn_test_slab_init(void)
426440
return -ENOMEM;
427441
drgn_test_big_slab_objects[i]->value = i;
428442
}
443+
444+
drgn_test_node_partial_kmem_cache =
445+
kmem_cache_create(
446+
"drgn_test_partial",
447+
sizeof(struct drgn_test_node_partial_slab_object),
448+
__alignof__(struct drgn_test_node_partial_slab_object),
449+
0, drgn_test_slab_ctor);
450+
if (!drgn_test_node_partial_kmem_cache)
451+
return -ENOMEM;
452+
drgn_test_node_partial_slab_object_p = kmem_cache_alloc(
453+
drgn_test_node_partial_kmem_cache, GFP_KERNEL);
454+
455+
// Move the object to the per-node partial list
456+
kmem_cache_shrink(drgn_test_node_partial_kmem_cache);
457+
429458
return 0;
430459
}
431460

0 commit comments

Comments
 (0)