Skip to content

Commit

Permalink
arm64, numa: Add NUMA support for arm64 platforms.
Browse files Browse the repository at this point in the history
Attempt to get the memory and CPU NUMA node via of_numa.  If that
fails, default the dummy NUMA node and map all memory and CPUs to node
0.

Tested-by: Shannon Zhao <shannon.zhao@linaro.org>
Reviewed-by: Robert Richter <rrichter@cavium.com>
Signed-off-by: Ganapatrao Kulkarni <gkulkarni@caviumnetworks.com>
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
Ganapatrao Kulkarni authored and wildea01 committed Apr 15, 2016
1 parent 3194ac6 commit 1a2db30
Show file tree
Hide file tree
Showing 10 changed files with 538 additions and 5 deletions.
26 changes: 26 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ config ARM64
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
Expand All @@ -98,6 +99,7 @@ config ARM64
select SYSCTL_EXCEPTION_TRACE
select HAVE_CONTEXT_TRACKING
select HAVE_ARM_SMCCC
select OF_NUMA if NUMA && OF
help
ARM 64-bit (AArch64) Linux support.

Expand Down Expand Up @@ -546,6 +548,30 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.

# Common NUMA Features
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
depends on SMP
help
Enable NUMA (Non Uniform Memory Access) support.

The kernel will try to allocate memory used by a CPU on the
local memory of the CPU and add some more
NUMA awareness to the kernel.

config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "2"
depends on NEED_MULTIPLE_NODES
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.

config USE_PERCPU_NUMA_NODE_ID
def_bool y
depends on NUMA

source kernel/Kconfig.preempt
source kernel/Kconfig.hz

Expand Down
12 changes: 12 additions & 0 deletions arch/arm64/include/asm/mmzone.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#ifndef __ASM_MMZONE_H
#define __ASM_MMZONE_H

#ifdef CONFIG_NUMA

#include <asm/numa.h>

extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[(nid)])

#endif /* CONFIG_NUMA */
#endif /* __ASM_MMZONE_H */
45 changes: 45 additions & 0 deletions arch/arm64/include/asm/numa.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#ifndef __ASM_NUMA_H
#define __ASM_NUMA_H

#include <asm/topology.h>

#ifdef CONFIG_NUMA

/* currently, arm64 implements flat NUMA topology */
#define parent_node(node) (node)

int __node_distance(int from, int to);
#define node_distance(a, b) __node_distance(a, b)

extern nodemask_t numa_nodes_parsed __initdata;

/* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
void numa_clear_node(unsigned int cpu);

#ifdef CONFIG_DEBUG_PER_CPU_MAPS
const struct cpumask *cpumask_of_node(int node);
#else
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
return node_to_cpumask_map[node];
}
#endif

void __init arm64_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
void numa_store_cpu_info(unsigned int cpu);

#else /* CONFIG_NUMA */

static inline void numa_store_cpu_info(unsigned int cpu) { }
static inline void arm64_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }

#endif /* CONFIG_NUMA */

#endif /* __ASM_NUMA_H */
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);

#ifdef CONFIG_NUMA

struct pci_bus;
int pcibus_to_node(struct pci_bus *bus);
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))

#endif /* CONFIG_NUMA */

#include <asm-generic/topology.h>

#endif /* _ASM_ARM_TOPOLOGY_H */
10 changes: 10 additions & 0 deletions arch/arm64/kernel/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,16 @@ int raw_pci_write(unsigned int domain, unsigned int bus,
return -ENXIO;
}

#ifdef CONFIG_NUMA

int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
}
EXPORT_SYMBOL(pcibus_to_node);

#endif

#ifdef CONFIG_ACPI
/* Root bridge scanning */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/kasan.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
Expand Down Expand Up @@ -319,6 +320,9 @@ static int __init topology_init(void)
{
int i;

for_each_online_node(i)
register_one_node(i);

for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
cpu->hotpluggable = 1;
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
Expand Down Expand Up @@ -203,6 +204,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
static void smp_store_cpu_info(unsigned int cpuid)
{
store_cpu_topology(cpuid);
numa_store_cpu_info(cpuid);
}

/*
Expand Down Expand Up @@ -633,6 +635,8 @@ static void __init of_parse_and_init_cpus(void)

pr_debug("cpu logical map 0x%llx\n", hwid);
cpu_logical_map(cpu_count) = hwid;

early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
next:
cpu_count++;
}
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
obj-$(CONFIG_NUMA) += numa.o

obj-$(CONFIG_KASAN) += kasan_init.o
KASAN_SANITIZE_kasan_init.o := n
35 changes: 30 additions & 5 deletions arch/arm64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
Expand Down Expand Up @@ -86,6 +87,21 @@ static phys_addr_t __init max_zone_dma_phys(void)
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}

#ifdef CONFIG_NUMA

static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};

if (IS_ENABLED(CONFIG_ZONE_DMA))
max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
max_zone_pfns[ZONE_NORMAL] = max;

free_area_init_nodes(max_zone_pfns);
}

#else

static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
Expand Down Expand Up @@ -126,6 +142,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init_node(0, zone_size, min, zhole_size);
}

#endif /* CONFIG_NUMA */

#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
Expand All @@ -142,10 +160,15 @@ static void __init arm64_memory_present(void)
static void __init arm64_memory_present(void)
{
struct memblock_region *reg;
int nid = 0;

for_each_memblock(memory, reg)
memory_present(0, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
for_each_memblock(memory, reg) {
#ifdef CONFIG_NUMA
nid = reg->nid;
#endif
memory_present(nid, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
}
#endif

Expand Down Expand Up @@ -278,7 +301,6 @@ void __init arm64_memblock_init(void)
dma_contiguous_reserve(arm64_dma_phys_limit);

memblock_allow_resize();
memblock_dump_all();
}

void __init bootmem_init(void)
Expand All @@ -290,6 +312,9 @@ void __init bootmem_init(void)

early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);

max_pfn = max_low_pfn = max;

arm64_numa_init();
/*
* Sparsemem tries to allocate bootmem in memory_present(), so must be
* done after the fixed reservations.
Expand All @@ -300,7 +325,7 @@ void __init bootmem_init(void)
zone_sizes_init(min, max);

high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
max_pfn = max_low_pfn = max;
memblock_dump_all();
}

#ifndef CONFIG_SPARSEMEM_VMEMMAP
Expand Down
Loading

0 comments on commit 1a2db30

Please sign in to comment.