Skip to content

Commit 979ff8d

Browse files
keessmb49
authored andcommitted
mm: vmalloc: support more granular vrealloc() sizing
BugLink: https://bugs.launchpad.net/bugs/2115252 commit a0309faf1cb0622cac7c820150b7abf2024acff5 upstream. Introduce struct vm_struct::requested_size so that the requested (re)allocation size is retained separately from the allocated area size. This means that KASAN will correctly poison the correct spans of requested bytes. This also means we can support growing the usable portion of an allocation that can already be supported by the existing area's existing allocation. Link: https://lkml.kernel.org/r/20250426001105.it.679-kees@kernel.org Fixes: 3ddc2fe ("mm: vmalloc: implement vrealloc()") Signed-off-by: Kees Cook <kees@kernel.org> Reported-by: Erhard Furtner <erhard_f@mailbox.org> Closes: https://lore.kernel.org/all/20250408192503.6149a816@outsider.home/ Reviewed-by: Danilo Krummrich <dakr@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Manuel Diewald <manuel.diewald@canonical.com> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
1 parent e30fbd9 commit 979ff8d

File tree

2 files changed

+25
-7
lines changed

2 files changed

+25
-7
lines changed

include/linux/vmalloc.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ struct vm_struct {
6161
unsigned int nr_pages;
6262
phys_addr_t phys_addr;
6363
const void *caller;
64+
unsigned long requested_size;
6465
};
6566

6667
struct vmap_area {

mm/vmalloc.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
19401940
{
19411941
vm->flags = flags;
19421942
vm->addr = (void *)va->va_start;
1943-
vm->size = va_size(va);
1943+
vm->size = vm->requested_size = va_size(va);
19441944
vm->caller = caller;
19451945
va->vm = vm;
19461946
}
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
31333133

31343134
area->flags = flags;
31353135
area->caller = caller;
3136+
area->requested_size = requested_size;
31363137

31373138
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
31383139
if (IS_ERR(va)) {
@@ -4068,6 +4069,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
40684069
*/
40694070
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40704071
{
4072+
struct vm_struct *vm = NULL;
4073+
size_t alloced_size = 0;
40714074
size_t old_size = 0;
40724075
void *n;
40734076

@@ -4077,30 +4080,44 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40774080
}
40784081

40794082
if (p) {
4080-
struct vm_struct *vm;
4081-
40824083
vm = find_vm_area(p);
40834084
if (unlikely(!vm)) {
40844085
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
40854086
return NULL;
40864087
}
40874088

4088-
old_size = get_vm_area_size(vm);
4089+
alloced_size = get_vm_area_size(vm);
4090+
old_size = vm->requested_size;
4091+
if (WARN(alloced_size < old_size,
4092+
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4093+
return NULL;
40894094
}
40904095

40914096
/*
40924097
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
40934098
* would be a good heuristic for when to shrink the vm_area?
40944099
*/
40954100
if (size <= old_size) {
4096-
/* Zero out spare memory. */
4097-
if (want_init_on_alloc(flags))
4101+
/* Zero out "freed" memory. */
4102+
if (want_init_on_free())
40984103
memset((void *)p + size, 0, old_size - size);
4104+
vm->requested_size = size;
40994105
kasan_poison_vmalloc(p + size, old_size - size);
4100-
kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
41014106
return (void *)p;
41024107
}
41034108

4109+
/*
4110+
* We already have the bytes available in the allocation; use them.
4111+
*/
4112+
if (size <= alloced_size) {
4113+
kasan_unpoison_vmalloc(p + old_size, size - old_size,
4114+
KASAN_VMALLOC_PROT_NORMAL);
4115+
/* Zero out "alloced" memory. */
4116+
if (want_init_on_alloc(flags))
4117+
memset((void *)p + old_size, 0, size - old_size);
4118+
vm->requested_size = size;
4119+
}
4120+
41044121
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
41054122
n = __vmalloc_noprof(size, flags);
41064123
if (!n)

0 commit comments

Comments
 (0)