@@ -1932,7 +1932,68 @@ static inline void free_slab_obj_exts(struct slab *slab)
19321932 kfree (obj_exts );
19331933 slab -> obj_exts = 0 ;
19341934}
1935+
1936+ static inline bool need_slab_obj_ext (void )
1937+ {
1938+ if (mem_alloc_profiling_enabled ())
1939+ return true;
1940+
1941+ /*
1942+ * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
1943+ * inside memcg_slab_post_alloc_hook. No other users for now.
1944+ */
1945+ return false;
1946+ }
1947+
1948+ static inline struct slabobj_ext *
1949+ prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
1950+ {
1951+ struct slab * slab ;
1952+
1953+ if (!p )
1954+ return NULL ;
1955+
1956+ if (s -> flags & SLAB_NO_OBJ_EXT )
1957+ return NULL ;
1958+
1959+ if (flags & __GFP_NO_OBJ_EXT )
1960+ return NULL ;
1961+
1962+ slab = virt_to_slab (p );
1963+ if (!slab_obj_exts (slab ) &&
1964+ WARN (alloc_slab_obj_exts (slab , s , flags , false),
1965+ "%s, %s: Failed to create slab extension vector!\n" ,
1966+ __func__ , s -> name ))
1967+ return NULL ;
1968+
1969+ return slab_obj_exts (slab ) + obj_to_index (s , slab , p );
1970+ }
1971+
1972+ static inline void
1973+ alloc_tagging_slab_free_hook (struct kmem_cache * s , struct slab * slab , void * * p ,
1974+ int objects )
1975+ {
1976+ #ifdef CONFIG_MEM_ALLOC_PROFILING
1977+ struct slabobj_ext * obj_exts ;
1978+ int i ;
1979+
1980+ if (!mem_alloc_profiling_enabled ())
1981+ return ;
1982+
1983+ obj_exts = slab_obj_exts (slab );
1984+ if (!obj_exts )
1985+ return ;
1986+
1987+ for (i = 0 ; i < objects ; i ++ ) {
1988+ unsigned int off = obj_to_index (s , slab , p [i ]);
1989+
1990+ alloc_tag_sub (& obj_exts [off ].ref , s -> size );
1991+ }
1992+ #endif
1993+ }
1994+
19351995#else /* CONFIG_SLAB_OBJ_EXT */
1996+
19361997static int alloc_slab_obj_exts (struct slab * slab , struct kmem_cache * s ,
19371998 gfp_t gfp , bool new_slab )
19381999{
@@ -1942,6 +2003,24 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
19422003static inline void free_slab_obj_exts (struct slab * slab )
19432004{
19442005}
2006+
2007+ static inline bool need_slab_obj_ext (void )
2008+ {
2009+ return false;
2010+ }
2011+
2012+ static inline struct slabobj_ext *
2013+ prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
2014+ {
2015+ return NULL ;
2016+ }
2017+
2018+ static inline void
2019+ alloc_tagging_slab_free_hook (struct kmem_cache * s , struct slab * slab , void * * p ,
2020+ int objects )
2021+ {
2022+ }
2023+
19452024#endif /* CONFIG_SLAB_OBJ_EXT */
19462025
19472026#ifdef CONFIG_MEMCG_KMEM
@@ -2370,7 +2449,7 @@ static __always_inline void account_slab(struct slab *slab, int order,
23702449static __always_inline void unaccount_slab (struct slab * slab , int order ,
23712450 struct kmem_cache * s )
23722451{
2373- if (memcg_kmem_online ())
2452+ if (memcg_kmem_online () || need_slab_obj_ext () )
23742453 free_slab_obj_exts (slab );
23752454
23762455 mod_node_page_state (slab_pgdat (slab ), cache_vmstat_idx (s ),
@@ -3823,6 +3902,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
38233902 unsigned int orig_size )
38243903{
38253904 unsigned int zero_size = s -> object_size ;
3905+ struct slabobj_ext * obj_exts ;
38263906 bool kasan_init = init ;
38273907 size_t i ;
38283908 gfp_t init_flags = flags & gfp_allowed_mask ;
@@ -3865,6 +3945,18 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
38653945 kmemleak_alloc_recursive (p [i ], s -> object_size , 1 ,
38663946 s -> flags , init_flags );
38673947 kmsan_slab_alloc (s , p [i ], init_flags );
3948+ if (need_slab_obj_ext ()) {
3949+ obj_exts = prepare_slab_obj_exts_hook (s , flags , p [i ]);
3950+ #ifdef CONFIG_MEM_ALLOC_PROFILING
3951+ /*
3952+ * Currently obj_exts is used only for allocation profiling.
3953+ * If other users appear then mem_alloc_profiling_enabled()
3954+ * check should be added before alloc_tag_add().
3955+ */
3956+ if (likely (obj_exts ))
3957+ alloc_tag_add (& obj_exts -> ref , current -> alloc_tag , s -> size );
3958+ #endif
3959+ }
38683960 }
38693961
38703962 memcg_slab_post_alloc_hook (s , objcg , flags , size , p );
@@ -4339,6 +4431,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
43394431 unsigned long addr )
43404432{
43414433 memcg_slab_free_hook (s , slab , & object , 1 );
4434+ alloc_tagging_slab_free_hook (s , slab , & object , 1 );
43424435
43434436 if (likely (slab_free_hook (s , object , slab_want_init_on_free (s ))))
43444437 do_slab_free (s , slab , object , object , 1 , addr );
@@ -4349,6 +4442,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
43494442 void * tail , void * * p , int cnt , unsigned long addr )
43504443{
43514444 memcg_slab_free_hook (s , slab , p , cnt );
4445+ alloc_tagging_slab_free_hook (s , slab , p , cnt );
43524446 /*
43534447 * With KASAN enabled slab_free_freelist_hook modifies the freelist
43544448 * to remove objects, whose reuse must be delayed.
0 commit comments