|  | 
| 13 | 13 | #include <asm/sparsemem.h> | 
| 14 | 14 | 
 | 
| 15 | 15 | /* | 
| 16 |  | - * The linear mapping and the start of memory are both 2M aligned (per | 
| 17 |  | - * the arm64 booting.txt requirements). Hence we can use section mapping | 
| 18 |  | - * with 4K (section size = 2M) but not with 16K (section size = 32M) or | 
| 19 |  | - * 64K (section size = 512M). | 
|  | 16 | + * The physical and virtual addresses of the start of the kernel image are | 
|  | 17 | + * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can | 
|  | 18 | + * use section mapping with 4K (section size = 2M) but not with 16K (section | 
|  | 19 | + * size = 32M) or 64K (section size = 512M). | 
| 20 | 20 |  */ | 
| 21 |  | - | 
| 22 |  | -/* | 
| 23 |  | - * The idmap and swapper page tables need some space reserved in the kernel | 
| 24 |  | - * image. Both require pgd, pud (4 levels only) and pmd tables to (section) | 
| 25 |  | - * map the kernel. With the 64K page configuration, swapper and idmap need to | 
| 26 |  | - * map to pte level. The swapper also maps the FDT (see __create_page_tables | 
| 27 |  | - * for more information). Note that the number of ID map translation levels | 
| 28 |  | - * could be increased on the fly if system RAM is out of reach for the default | 
| 29 |  | - * VA range, so pages required to map highest possible PA are reserved in all | 
| 30 |  | - * cases. | 
| 31 |  | - */ | 
| 32 |  | -#ifdef CONFIG_ARM64_4K_PAGES | 
| 33 |  | -#define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS - 1) | 
|  | 21 | +#if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN | 
|  | 22 | +#define SWAPPER_BLOCK_SHIFT	PMD_SHIFT | 
|  | 23 | +#define SWAPPER_SKIP_LEVEL	1 | 
| 34 | 24 | #else | 
| 35 |  | -#define SWAPPER_PGTABLE_LEVELS	(CONFIG_PGTABLE_LEVELS) | 
|  | 25 | +#define SWAPPER_BLOCK_SHIFT	PAGE_SHIFT | 
|  | 26 | +#define SWAPPER_SKIP_LEVEL	0 | 
| 36 | 27 | #endif | 
|  | 28 | +#define SWAPPER_BLOCK_SIZE	(UL(1) << SWAPPER_BLOCK_SHIFT) | 
|  | 29 | +#define SWAPPER_TABLE_SHIFT	(SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3) | 
|  | 30 | + | 
|  | 31 | +#define SWAPPER_PGTABLE_LEVELS		(CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL) | 
| 37 | 32 | 
 | 
| 38 | 33 | #define IDMAP_VA_BITS		48 | 
| 39 | 34 | #define IDMAP_LEVELS		ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS) | 
|  | 
| 53 | 48 | #define EARLY_ENTRIES(vstart, vend, shift, add) \ | 
| 54 | 49 | 	(SPAN_NR_ENTRIES(vstart, vend, shift) + (add)) | 
| 55 | 50 | 
 | 
| 56 |  | -#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) | 
| 57 |  | - | 
| 58 |  | -#if SWAPPER_PGTABLE_LEVELS > 3 | 
| 59 |  | -#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add)) | 
| 60 |  | -#else | 
| 61 |  | -#define EARLY_PUDS(vstart, vend, add) (0) | 
| 62 |  | -#endif | 
|  | 51 | +#define EARLY_LEVEL(lvl, vstart, vend, add)	\ | 
|  | 52 | +	(SWAPPER_PGTABLE_LEVELS > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0) | 
| 63 | 53 | 
 | 
| 64 |  | -#if SWAPPER_PGTABLE_LEVELS > 2 | 
| 65 |  | -#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add)) | 
| 66 |  | -#else | 
| 67 |  | -#define EARLY_PMDS(vstart, vend, add) (0) | 
| 68 |  | -#endif | 
| 69 |  | - | 
| 70 |  | -#define EARLY_PAGES(vstart, vend, add) ( 1 			/* PGDIR page */				\ | 
| 71 |  | -			+ EARLY_PGDS((vstart), (vend), add) 	/* each PGDIR needs a next level page table */	\ | 
| 72 |  | -			+ EARLY_PUDS((vstart), (vend), add)	/* each PUD needs a next level page table */	\ | 
| 73 |  | -			+ EARLY_PMDS((vstart), (vend), add))	/* each PMD needs a next level page table */ | 
|  | 54 | +#define EARLY_PAGES(vstart, vend, add) (1 	/* PGDIR page */				\ | 
|  | 55 | +	+ EARLY_LEVEL(3, (vstart), (vend), add) /* each entry needs a next level page table */	\ | 
|  | 56 | +	+ EARLY_LEVEL(2, (vstart), (vend), add)	/* each entry needs a next level page table */	\ | 
|  | 57 | +	+ EARLY_LEVEL(1, (vstart), (vend), add))/* each entry needs a next level page table */ | 
| 74 | 58 | #define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE) + EARLY_SEGMENT_EXTRA_PAGES)) | 
| 75 | 59 | 
 | 
| 76 | 60 | /* the initial ID map may need two extra pages if it needs to be extended */ | 
|  | 
| 81 | 65 | #endif | 
| 82 | 66 | #define INIT_IDMAP_DIR_PAGES	EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) | 
| 83 | 67 | 
 | 
| 84 |  | -/* Initial memory map size */ | 
| 85 |  | -#ifdef CONFIG_ARM64_4K_PAGES | 
| 86 |  | -#define SWAPPER_BLOCK_SHIFT	PMD_SHIFT | 
| 87 |  | -#define SWAPPER_BLOCK_SIZE	PMD_SIZE | 
| 88 |  | -#define SWAPPER_TABLE_SHIFT	PUD_SHIFT | 
| 89 |  | -#else | 
| 90 |  | -#define SWAPPER_BLOCK_SHIFT	PAGE_SHIFT | 
| 91 |  | -#define SWAPPER_BLOCK_SIZE	PAGE_SIZE | 
| 92 |  | -#define SWAPPER_TABLE_SHIFT	PMD_SHIFT | 
| 93 |  | -#endif | 
| 94 |  | - | 
| 95 | 68 | /* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */ | 
| 96 | 69 | #define KERNEL_SEGMENT_COUNT	5 | 
| 97 | 70 | 
 | 
|  | 
0 commit comments