Skip to content

Commit

Permalink
mm, kasan: add GFP flags to KASAN API
Browse files Browse the repository at this point in the history
Add GFP flags to KASAN hooks for future patches to use.

This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.

Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
ramosian-glider authored and torvalds committed Mar 25, 2016
1 parent 7ed2f9e commit 505f5dc
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 42 deletions.
19 changes: 11 additions & 8 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,14 @@ void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);

void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);

void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
void kasan_slab_free(struct kmem_cache *s, void *object);

struct kasan_cache {
Expand Down Expand Up @@ -94,14 +95,16 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}

static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
gfp_t flags) {}

static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}

static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
Expand Down
4 changes: 2 additions & 2 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc(s, flags);

kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, flags);
return ret;
}

Expand All @@ -387,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);

kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */
Expand Down
15 changes: 8 additions & 7 deletions mm/kasan/kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,9 +434,9 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
}
#endif

void kasan_slab_alloc(struct kmem_cache *cache, void *object)
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_kmalloc(cache, object, cache->object_size);
kasan_kmalloc(cache, object, cache->object_size, flags);
}

void kasan_slab_free(struct kmem_cache *cache, void *object)
Expand All @@ -462,7 +462,8 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}

void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
Expand Down Expand Up @@ -491,7 +492,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
}
EXPORT_SYMBOL(kasan_kmalloc);

void kasan_kmalloc_large(const void *ptr, size_t size)
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
struct page *page;
unsigned long redzone_start;
Expand All @@ -510,7 +511,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size)
KASAN_PAGE_REDZONE);
}

void kasan_krealloc(const void *object, size_t size)
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct page *page;

Expand All @@ -520,9 +521,9 @@ void kasan_krealloc(const void *object, size_t size)
page = virt_to_head_page(object);

if (unlikely(!PageSlab(page)))
kasan_kmalloc_large(object, size);
kasan_kmalloc_large(object, size, flags);
else
kasan_kmalloc(page->slab_cache, object, size);
kasan_kmalloc(page->slab_cache, object, size, flags);
}

void kasan_kfree(void *ptr)
Expand Down
16 changes: 8 additions & 8 deletions mm/mempool.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}

static void kasan_unpoison_element(mempool_t *pool, void *element)
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
if (pool->alloc == mempool_alloc_slab)
kasan_slab_alloc(pool->pool_data, element);
kasan_slab_alloc(pool->pool_data, element, flags);
if (pool->alloc == mempool_kmalloc)
kasan_krealloc(element, (size_t)pool->pool_data);
kasan_krealloc(element, (size_t)pool->pool_data, flags);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
Expand All @@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element)
pool->elements[pool->curr_nr++] = element;
}

static void *remove_element(mempool_t *pool)
static void *remove_element(mempool_t *pool, gfp_t flags)
{
void *element = pool->elements[--pool->curr_nr];

BUG_ON(pool->curr_nr < 0);
kasan_unpoison_element(pool, element);
kasan_unpoison_element(pool, element, flags);
check_element(pool, element);
return element;
}
Expand All @@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool)
return;

while (pool->curr_nr) {
void *element = remove_element(pool);
void *element = remove_element(pool, GFP_KERNEL);
pool->free(element, pool->pool_data);
}
kfree(pool->elements);
Expand Down Expand Up @@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
while (new_min_nr < pool->curr_nr) {
element = remove_element(pool);
element = remove_element(pool, GFP_KERNEL);
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags);
Expand Down Expand Up @@ -347,7 +347,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)

spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool);
element = remove_element(pool, gfp_temp);
spin_unlock_irqrestore(&pool->lock, flags);
/* paired with rmb in mempool_free(), read comment there */
smp_wmb();
Expand Down
15 changes: 8 additions & 7 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);

kasan_slab_alloc(cachep, ret);
kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);

Expand Down Expand Up @@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)

ret = slab_alloc(cachep, flags, _RET_IP_);

kasan_kmalloc(cachep, ret, size);
kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
Expand All @@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);

kasan_slab_alloc(cachep, ret);
kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
Expand All @@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
void *ret;

ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_kmalloc(cachep, ret, size);

kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
Expand All @@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
kasan_kmalloc(cachep, ret, size);
kasan_kmalloc(cachep, ret, size, flags);

return ret;
}
Expand Down Expand Up @@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep;
ret = slab_alloc(cachep, flags, caller);

kasan_kmalloc(cachep, ret, size);
kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
size, cachep->size, flags);

Expand Down Expand Up @@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
kasan_krealloc(objp, size);
kasan_krealloc(objp, size, GFP_NOWAIT);

return size;
}
Expand Down
2 changes: 1 addition & 1 deletion mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object);
kasan_slab_alloc(s, object, flags);
}
memcg_kmem_put_cache(s);
}
Expand Down
4 changes: 2 additions & 2 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1013,7 +1013,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_kmem_pages(flags, order);
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size);
kasan_kmalloc_large(ret, size, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
Expand Down Expand Up @@ -1192,7 +1192,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
ks = ksize(p);

if (ks >= new_size) {
kasan_krealloc((void *)p, new_size);
kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}

Expand Down
15 changes: 8 additions & 7 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1313,7 +1313,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
kasan_kmalloc_large(ptr, size);
kasan_kmalloc_large(ptr, size, flags);
}

static inline void kfree_hook(const void *x)
Expand Down Expand Up @@ -2596,7 +2596,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
Expand Down Expand Up @@ -2624,7 +2624,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);

kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Expand Down Expand Up @@ -3182,7 +3182,8 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
GFP_KERNEL);
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);

Expand Down Expand Up @@ -3561,7 +3562,7 @@ void *__kmalloc(size_t size, gfp_t flags)

trace_kmalloc(_RET_IP_, ret, size, s->size, flags);

kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, flags);

return ret;
}
Expand Down Expand Up @@ -3606,7 +3607,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)

trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);

kasan_kmalloc(s, ret, size);
kasan_kmalloc(s, ret, size, flags);

return ret;
}
Expand Down Expand Up @@ -3635,7 +3636,7 @@ size_t ksize(const void *object)
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
so we need unpoison this area. */
kasan_krealloc(object, size);
kasan_krealloc(object, size, GFP_NOWAIT);
return size;
}
EXPORT_SYMBOL(ksize);
Expand Down

0 comments on commit 505f5dc

Please sign in to comment.