Skip to content

Commit 130d4df

Browse files
committed
mm/sl[au]b: rearrange struct slab fields to allow larger rcu_head
Joel reports [1] that increasing the rcu_head size for debugging purposes used to work before struct slab was split from struct page, but now runs into the various SLAB_MATCH() sanity checks of the layout. This is because the rcu_head in struct page is in union with large sub-structures and has space to grow without exceeding their size, while in struct slab (for SLAB and SLUB) it's in union only with a list_head. On closer inspection (and after the previous patch) we can put all fields except slab_cache to a union with rcu_head, as slab_cache is sufficient for the rcu freeing callbacks to work and the rest can be overwritten by rcu_head without causing issues. This is only somewhat complicated by the need to keep SLUB's freelist+counters aligned for cmpxchg_double. As a result the fields need to be reordered so that slab_cache is first (after page flags) and the union with rcu_head follows. For consistency, do that for SLAB as well, although not necessary there. As a result, the rcu_head field in struct page and struct slab is no longer at the same offset, but that doesn't matter as there is no casting that would rely on that in the slab freeing callbacks, so we can just drop the respective SLAB_MATCH() check. Also we need to update the SLAB_MATCH() for compound_head to reflect the new ordering. While at it, also add a static_assert to check the alignment needed for cmpxchg_double so mistakes are found sooner than a runtime GPF. [1] https://lore.kernel.org/all/85afd876-d8bb-0804-b2c5-48ed3055e702@joelfernandes.org/ Reported-by: Joel Fernandes <joel@joelfernandes.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
1 parent 8b88176 commit 130d4df

File tree

1 file changed

+32
-22
lines changed

1 file changed

+32
-22
lines changed

Diff for: mm/slab.h

+32-22
Original file line numberDiff line numberDiff line change
@@ -11,37 +11,43 @@ struct slab {
1111

1212
#if defined(CONFIG_SLAB)
1313

14+
struct kmem_cache *slab_cache;
1415
union {
15-
struct list_head slab_list;
16+
struct {
17+
struct list_head slab_list;
18+
void *freelist; /* array of free object indexes */
19+
void *s_mem; /* first object */
20+
};
1621
struct rcu_head rcu_head;
1722
};
18-
struct kmem_cache *slab_cache;
19-
void *freelist; /* array of free object indexes */
20-
void *s_mem; /* first object */
2123
unsigned int active;
2224

2325
#elif defined(CONFIG_SLUB)
2426

25-
union {
26-
struct list_head slab_list;
27-
struct rcu_head rcu_head;
28-
#ifdef CONFIG_SLUB_CPU_PARTIAL
29-
struct {
30-
struct slab *next;
31-
int slabs; /* Nr of slabs left */
32-
};
33-
#endif
34-
};
3527
struct kmem_cache *slab_cache;
36-
/* Double-word boundary */
37-
void *freelist; /* first free object */
3828
union {
39-
unsigned long counters;
4029
struct {
41-
unsigned inuse:16;
42-
unsigned objects:15;
43-
unsigned frozen:1;
30+
union {
31+
struct list_head slab_list;
32+
#ifdef CONFIG_SLUB_CPU_PARTIAL
33+
struct {
34+
struct slab *next;
35+
int slabs; /* Nr of slabs left */
36+
};
37+
#endif
38+
};
39+
/* Double-word boundary */
40+
void *freelist; /* first free object */
41+
union {
42+
unsigned long counters;
43+
struct {
44+
unsigned inuse:16;
45+
unsigned objects:15;
46+
unsigned frozen:1;
47+
};
48+
};
4449
};
50+
struct rcu_head rcu_head;
4551
};
4652
unsigned int __unused;
4753

@@ -66,16 +72,20 @@ struct slab {
6672
#define SLAB_MATCH(pg, sl) \
6773
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
6874
SLAB_MATCH(flags, __page_flags);
69-
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
7075
#ifndef CONFIG_SLOB
71-
SLAB_MATCH(rcu_head, rcu_head);
76+
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
77+
#else
78+
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
7279
#endif
7380
SLAB_MATCH(_refcount, __page_refcount);
7481
#ifdef CONFIG_MEMCG
7582
SLAB_MATCH(memcg_data, memcg_data);
7683
#endif
7784
#undef SLAB_MATCH
7885
static_assert(sizeof(struct slab) <= sizeof(struct page));
86+
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
87+
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
88+
#endif
7989

8090
/**
8191
* folio_slab - Converts from folio to slab.

0 commit comments

Comments
 (0)