Skip to content

Commit f6785e0

Browse files
committed
Merge tag 'slab-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka: "Fixes for issues introduced in this merge window: kobject memory leak, unsupressed warning and possible lockup in new slub_kunit tests, misleading code in kvfree_rcu_queue_batch()" * tag 'slab-for-6.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: slub/kunit: skip test_kfree_rcu when the slub kunit test is built-in mm, slab: suppress warnings in test_leak_destroy kunit test rcu/kvfree: Refactor kvfree_rcu_queue_batch() mm, slab: fix use of SLAB_SUPPORTS_SYSFS in kmem_cache_release()
2 parents e1043b6 + cac39b0 commit f6785e0

File tree

5 files changed

+30
-15
lines changed

5 files changed

+30
-15
lines changed

kernel/rcu/tree.c

+5-4
Original file line numberDiff line numberDiff line change
@@ -3607,11 +3607,12 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
36073607
}
36083608

36093609
// One work is per one batch, so there are three
3610-
// "free channels", the batch can handle. It can
3611-
// be that the work is in the pending state when
3612-
// channels have been detached following by each
3613-
// other.
3610+
// "free channels", the batch can handle. Break
3611+
// the loop since it is done with this CPU thus
3612+
// queuing an RCU work is _always_ success here.
36143613
queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
3614+
WARN_ON_ONCE(!queued);
3615+
break;
36153616
}
36163617
}
36173618

lib/slub_kunit.c

+12-6
Original file line numberDiff line numberDiff line change
@@ -164,10 +164,16 @@ struct test_kfree_rcu_struct {
164164

165165
static void test_kfree_rcu(struct kunit *test)
166166
{
167-
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
168-
sizeof(struct test_kfree_rcu_struct),
169-
SLAB_NO_MERGE);
170-
struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL);
167+
struct kmem_cache *s;
168+
struct test_kfree_rcu_struct *p;
169+
170+
if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
171+
kunit_skip(test, "can't do kfree_rcu() when test is built-in");
172+
173+
s = test_kmem_cache_create("TestSlub_kfree_rcu",
174+
sizeof(struct test_kfree_rcu_struct),
175+
SLAB_NO_MERGE);
176+
p = kmem_cache_alloc(s, GFP_KERNEL);
171177

172178
kfree_rcu(p, rcu);
173179
kmem_cache_destroy(s);
@@ -177,13 +183,13 @@ static void test_kfree_rcu(struct kunit *test)
177183

178184
static void test_leak_destroy(struct kunit *test)
179185
{
180-
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
186+
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
181187
64, SLAB_NO_MERGE);
182188
kmem_cache_alloc(s, GFP_KERNEL);
183189

184190
kmem_cache_destroy(s);
185191

186-
KUNIT_EXPECT_EQ(test, 1, slab_errors);
192+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
187193
}
188194

189195
static int test_init(struct kunit *test)

mm/slab.h

+7-1
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ struct kmem_cache {
310310
};
311311

312312
#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
313-
#define SLAB_SUPPORTS_SYSFS
313+
#define SLAB_SUPPORTS_SYSFS 1
314314
void sysfs_slab_unlink(struct kmem_cache *s);
315315
void sysfs_slab_release(struct kmem_cache *s);
316316
#else
@@ -546,6 +546,12 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
546546
return false;
547547
}
548548

549+
#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
550+
bool slab_in_kunit_test(void);
551+
#else
552+
static inline bool slab_in_kunit_test(void) { return false; }
553+
#endif
554+
549555
#ifdef CONFIG_SLAB_OBJ_EXT
550556

551557
/*

mm/slab_common.c

+3-2
Original file line numberDiff line numberDiff line change
@@ -508,8 +508,9 @@ void kmem_cache_destroy(struct kmem_cache *s)
508508
kasan_cache_shutdown(s);
509509

510510
err = __kmem_cache_shutdown(s);
511-
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
512-
__func__, s->name, (void *)_RET_IP_);
511+
if (!slab_in_kunit_test())
512+
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
513+
__func__, s->name, (void *)_RET_IP_);
513514

514515
list_del(&s->list);
515516

mm/slub.c

+3-2
Original file line numberDiff line numberDiff line change
@@ -827,7 +827,7 @@ static bool slab_add_kunit_errors(void)
827827
return true;
828828
}
829829

830-
static bool slab_in_kunit_test(void)
830+
bool slab_in_kunit_test(void)
831831
{
832832
struct kunit_resource *resource;
833833

@@ -843,7 +843,6 @@ static bool slab_in_kunit_test(void)
843843
}
844844
#else
845845
static inline bool slab_add_kunit_errors(void) { return false; }
846-
static inline bool slab_in_kunit_test(void) { return false; }
847846
#endif
848847

849848
static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -5436,6 +5435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
54365435
for_each_object(p, s, addr, slab->objects) {
54375436

54385437
if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5438+
if (slab_add_kunit_errors())
5439+
continue;
54395440
pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
54405441
print_tracking(s, p);
54415442
}

0 commit comments

Comments
 (0)