diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index ba7f4c8f5c3e4c..179bfb381bd478 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -36,6 +36,10 @@ #include +#if defined(CONFIG_XT_CMA_HELPER) +#include +#endif + struct start_info _xen_start_info; struct start_info *xen_start_info = &_xen_start_info; EXPORT_SYMBOL(xen_start_info); @@ -353,7 +357,11 @@ static int __init xen_guest_init(void) if (efi_enabled(EFI_RUNTIME_SERVICES)) xen_efi_runtime_setup(); +#if defined(CONFIG_XT_CMA_HELPER) + shared_info_page = (struct shared_info *)xt_cma_get_zeroed_page(GFP_KERNEL); +#else shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL); +#endif if (!shared_info_page) { pr_err("not enough memory\n"); diff --git a/arch/arm/xen/xt_cma_helper.c b/arch/arm/xen/xt_cma_helper.c new file mode 100644 index 00000000000000..3c126516efe256 --- /dev/null +++ b/arch/arm/xen/xt_cma_helper.c @@ -0,0 +1,270 @@ +#include +#include +#include + +#include +#include +#include +#include + +/* The size of the boot memory in MiB for page allocator. */ +static int xt_cma_helper_bootmem_page_pool_sz __initdata = SZ_64M; + +/* The size of the boot memory in MiB for CMA allocator. */ +static int xt_cma_helper_bootmem_cma_pool_sz __initdata = SZ_128M; + +/* Memory pool for non-CMA allocations (page pool). */ +static phys_addr_t xt_cma_helper_bootmem_page_pool_phys; +static struct gen_pool *xt_cma_helper_bootmem_page_pool; + +/* Memory pool for CMA allocations. */ +static phys_addr_t xt_cma_helper_bootmem_cma_pool_phys; +static struct gen_pool *xt_cma_helper_bootmem_cma_pool; + +static int __init xt_cma_helper_bootmem_page_setup(char *p) +{ + xt_cma_helper_bootmem_page_pool_sz = memparse(p, &p); + return 0; +} +early_param("xt_page_pool", xt_cma_helper_bootmem_page_setup); + +static int __init xt_cma_helper_bootmem_cma_setup(char *p) +{ + xt_cma_helper_bootmem_cma_pool_sz = memparse(p, &p); + return 0; +} +early_param("xt_cma", xt_cma_helper_bootmem_cma_setup); + +void __init xt_cma_helper_init(void) +{ + if (!xen_domain()) + return; + + xt_cma_helper_bootmem_page_pool_phys = + memblock_alloc_base(xt_cma_helper_bootmem_page_pool_sz, + SZ_2M, MEMBLOCK_ALLOC_ANYWHERE); + + xt_cma_helper_bootmem_cma_pool_phys = + memblock_alloc_base(xt_cma_helper_bootmem_cma_pool_sz, + SZ_2M, MEMBLOCK_ALLOC_ANYWHERE); + + printk("Allocated %d bytes for Xen page allocator at 0x%llx", + xt_cma_helper_bootmem_page_pool_sz, + xt_cma_helper_bootmem_page_pool_phys); + + printk("Allocated %d bytes for Xen CMA allocator at 0x%llx", + xt_cma_helper_bootmem_cma_pool_sz, + xt_cma_helper_bootmem_cma_pool_phys); +} + +static void create_page_alloc_pools(void) +{ + void *vaddr = phys_to_virt(xt_cma_helper_bootmem_page_pool_phys); + int ret; + + /* Page pool. */ + xt_cma_helper_bootmem_page_pool = + gen_pool_create(PAGE_SHIFT, -1); + BUG_ON(!xt_cma_helper_bootmem_page_pool); + + gen_pool_set_algo(xt_cma_helper_bootmem_page_pool, + gen_pool_best_fit, NULL); + ret = gen_pool_add_virt(xt_cma_helper_bootmem_page_pool, + (unsigned long)vaddr, + xt_cma_helper_bootmem_page_pool_phys, + xt_cma_helper_bootmem_page_pool_sz, -1); + BUG_ON(ret); + + /* CMA pool. */ + vaddr = phys_to_virt(xt_cma_helper_bootmem_cma_pool_phys); + + xt_cma_helper_bootmem_cma_pool = + gen_pool_create(PAGE_SHIFT, -1); + BUG_ON(!xt_cma_helper_bootmem_cma_pool); + + gen_pool_set_algo(xt_cma_helper_bootmem_cma_pool, + gen_pool_best_fit, NULL); + ret = gen_pool_add_virt(xt_cma_helper_bootmem_cma_pool, + (unsigned long)vaddr, + xt_cma_helper_bootmem_cma_pool_phys, + xt_cma_helper_bootmem_cma_pool_sz, -1); + BUG_ON(ret); +} + +struct page *xt_cma_alloc_page(gfp_t gfp_mask) +{ + void *va; + + /* + * FIXME: this is first called from xen_guest_init which is + * an early_init call. We can also install an early_init + * for the pool creation below, but cannot guarantee it runs + * before xen_guest_init. + */ + if (unlikely(!xt_cma_helper_bootmem_page_pool)) + create_page_alloc_pools(); + + va = (void *)gen_pool_alloc(xt_cma_helper_bootmem_page_pool, + PAGE_SIZE); + if (IS_ERR(va)) + return va; + + return virt_to_page(va); +} + +unsigned long xt_cma_get_zeroed_page(gfp_t gfp_mask) +{ + void *va = page_to_virt(xt_cma_alloc_page(gfp_mask)); + + memset(va, 0, PAGE_SIZE); + return (unsigned long)va; +} + +void xt_cma_free_page(unsigned long addr) +{ + gen_pool_free(xt_cma_helper_bootmem_page_pool, + addr, PAGE_SIZE); +} + +static int xt_cma_alloc_pages(gfp_t gfp_mask, int count, + struct page **pages) +{ + int i; + + /* + * Alocate pages one by one - mimic what balloon driver does: + * this gives a possibilyty then to free individual pages which + * is a problem if we allocate all pages at once from the pool. + */ + for (i = 0; i < count; i++) { + pages[i] = xt_cma_alloc_page(gfp_mask); + if (IS_ERR(pages[i])) + goto fail; + } + return 0; + +fail: + for (; i >=0; i--) + xt_cma_free_page((unsigned long)page_to_virt(pages[i])); + return -ENOMEM; +} + +static void xt_cma_free_pages(struct page **pages, int count) +{ + int i; + + for (i = 0; i < count; i++) + xt_cma_free_page((unsigned long)page_to_virt(pages[i])); +} + +void *xt_cma_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_mask) +{ + void *va; + + va = (void *)gen_pool_alloc(xt_cma_helper_bootmem_cma_pool, + ALIGN(size, PAGE_SIZE)); + if (IS_ERR(va)) + return va; + + *dma_handle = virt_to_phys(va); + return va; +} +EXPORT_SYMBOL(xt_cma_dma_alloc_coherent); + +void *xt_cma_dma_alloc_wc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_mask) +{ + return xt_cma_dma_alloc_coherent(dev, size, dma_handle, gfp_mask); +} +EXPORT_SYMBOL(xt_cma_dma_alloc_wc); + +void xt_cma_dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + gen_pool_free(xt_cma_helper_bootmem_cma_pool, + (unsigned long)cpu_addr, ALIGN(size, PAGE_SIZE)); +} +EXPORT_SYMBOL(xt_cma_dma_free_coherent); + +void xt_cma_dma_free_wc(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + xt_cma_dma_free_coherent(dev, size, cpu_addr, dma_handle); +} +EXPORT_SYMBOL(xt_cma_dma_free_wc); + +int alloc_xenballooned_pages(int nr_pages, struct page **pages) +{ + xen_pfn_t *frames; + int i, ret; + + ret = xt_cma_alloc_pages(GFP_KERNEL, nr_pages, pages); + if (ret < 0) + return ret; + + frames = kcalloc(nr_pages, sizeof(*frames), GFP_KERNEL); + if (!frames) { + pr_debug("Failed to allocate frames to decrease reservation\n"); + ret = -ENOMEM; + goto fail; + } + + for (i = 0; i < nr_pages; i++) { + struct page *page = pages[i]; + + frames[i] = xen_page_to_gfn(page); + xenmem_reservation_scrub_page(page); + } + + xenmem_reservation_va_mapping_reset(nr_pages, pages); + + ret = xenmem_reservation_decrease(nr_pages, frames); + if (ret != nr_pages) { + pr_debug("Failed to decrease reservation for pages\n"); + ret = -EFAULT; + goto fail; + } + + ret = gnttab_pages_set_private(nr_pages, pages); + if (ret < 0) + goto fail; + + kfree(frames); + return 0; + +fail: + xt_cma_free_pages(pages, nr_pages); + kfree(frames); + return ret; +} +EXPORT_SYMBOL(alloc_xenballooned_pages); + +void free_xenballooned_pages(int nr_pages, struct page **pages) +{ + xen_pfn_t *frames; + int i, ret; + + gnttab_pages_clear_private(nr_pages, pages); + + frames = kcalloc(nr_pages, sizeof(*frames), GFP_KERNEL); + if (!frames) { + pr_debug("Failed to allocate frames to increase reservation\n"); + return; + } + + for (i = 0; i < nr_pages; i++) + frames[i] = xen_page_to_gfn(pages[i]); + + ret = xenmem_reservation_increase(nr_pages, frames); + if (ret != nr_pages) + pr_debug("Failed to increase reservation for pages\n"); + + xenmem_reservation_va_mapping_update(nr_pages, pages, frames); + + xt_cma_free_pages(pages, nr_pages); + + kfree(frames); +} +EXPORT_SYMBOL(free_xenballooned_pages); + diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 00e7b900ca4193..fe2d3946607420 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -53,6 +53,10 @@ #include #include +#if defined(CONFIG_XT_CMA_HELPER) +#include +#endif + /* * We need to be able to catch inadvertent references to memstart_addr * that occur (potentially in generic code) before arm64_memblock_init() @@ -478,6 +482,10 @@ void __init arm64_memblock_init(void) high_memory = __va(memblock_end_of_DRAM() - 1) + 1; +#if defined(CONFIG_XT_CMA_HELPER) + xt_cma_helper_init(); +#endif + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile index 8ff8aa9c62285c..79e3790e683574 100644 --- a/arch/arm64/xen/Makefile +++ b/arch/arm64/xen/Makefile @@ -1,3 +1,4 @@ xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) obj-y := xen-arm.o hypercall.o -obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o) +obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o) +obj-$(CONFIG_XT_CMA_HELPER) += $(addprefix ../../arm/xen/, xt_cma_helper.o) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e12bb256036fbb..76739c1e204081 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -73,7 +73,9 @@ #include #include +#if !defined(CONFIG_XT_CMA_HELPER) static int xen_hotplug_unpopulated; +#endif #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG @@ -597,6 +599,7 @@ void balloon_set_new_target(unsigned long target) } EXPORT_SYMBOL_GPL(balloon_set_new_target); +#if !defined(CONFIG_XT_CMA_HELPER) static int add_ballooned_pages(int nr_pages) { enum bp_state st; @@ -692,6 +695,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) mutex_unlock(&balloon_mutex); } EXPORT_SYMBOL(free_xenballooned_pages); +#endif #ifdef CONFIG_XEN_PV static void __init balloon_add_region(unsigned long start_pfn, diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 85eb3134712b47..c4420e3ea0c047 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -63,6 +63,10 @@ #include #include +#if defined(CONFIG_XT_CMA_HELPER) +#include +#endif + /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff @@ -763,6 +767,16 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) int i, ret; size = args->nr_pages << PAGE_SHIFT; +#if defined(CONFIG_XT_CMA_HELPER) + if (args->coherent) + args->vaddr = xt_cma_dma_alloc_coherent(args->dev, size, + &args->dev_bus_addr, + GFP_KERNEL | __GFP_NOWARN); + else + args->vaddr = xt_cma_dma_alloc_wc(args->dev, size, + &args->dev_bus_addr, + GFP_KERNEL | __GFP_NOWARN); +#else if (args->coherent) args->vaddr = dma_alloc_coherent(args->dev, size, &args->dev_bus_addr, @@ -771,6 +785,7 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) args->vaddr = dma_alloc_wc(args->dev, size, &args->dev_bus_addr, GFP_KERNEL | __GFP_NOWARN); +#endif if (!args->vaddr) { pr_debug("Failed to allocate DMA buffer of size %zu\n", size); return -ENOMEM; @@ -833,12 +848,21 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) args->frames); size = args->nr_pages << PAGE_SHIFT; +#if defined(CONFIG_XT_CMA_HELPER) + if (args->coherent) + xt_cma_dma_free_coherent(args->dev, size, + args->vaddr, args->dev_bus_addr); + else + xt_cma_dma_free_wc(args->dev, size, + args->vaddr, args->dev_bus_addr); +#else if (args->coherent) dma_free_coherent(args->dev, size, args->vaddr, args->dev_bus_addr); else dma_free_wc(args->dev, size, args->vaddr, args->dev_bus_addr); +#endif return ret; } EXPORT_SYMBOL_GPL(gnttab_dma_free_pages); diff --git a/include/xen/xt_cma_helper.h b/include/xen/xt_cma_helper.h new file mode 100644 index 00000000000000..725d426aca144c --- /dev/null +++ b/include/xen/xt_cma_helper.h @@ -0,0 +1,23 @@ +#ifndef XT_CMA_HELPER_H +#define XT_CMA_HELPER_H + +#include +#include + +void xt_cma_helper_init(void); + +unsigned long xt_cma_get_zeroed_page(gfp_t gfp_mask); +struct page *xt_cma_alloc_page(gfp_t gfp_mask); +void xt_cma_free_page(unsigned long addr); + +void *xt_cma_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_mask); +void *xt_cma_dma_alloc_wc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_mask); + +void xt_cma_dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle); +void xt_cma_dma_free_wc(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr); + +#endif /* XT_CMA_HELPER_H */ diff --git a/mm/Kconfig b/mm/Kconfig index 9c4bdddd80c212..547f6cf76bbdeb 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -527,6 +527,15 @@ config CMA_AREAS If unsure, leave the default value "7". +config XT_CMA_HELPER + bool "Xen-troops Xen CMA and page allocators" + default y + depends on CMA && XEN + help + Xen-troops Xen dedicated CMA and page allocators support. + + If unsure, say "n". + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS