diff options
Diffstat (limited to 'arch/arm64/include/asm/kernel-pgtable.h')
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 103 |
1 files changed, 42 insertions, 61 deletions
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 83ddb14b95..bf05a77873 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -13,28 +13,27 @@ #include <asm/sparsemem.h> /* - * The linear mapping and the start of memory are both 2M aligned (per - * the arm64 booting.txt requirements). Hence we can use section mapping - * with 4K (section size = 2M) but not with 16K (section size = 32M) or - * 64K (section size = 512M). + * The physical and virtual addresses of the start of the kernel image are + * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can + * use section mapping with 4K (section size = 2M) but not with 16K (section + * size = 32M) or 64K (section size = 512M). */ - -/* - * The idmap and swapper page tables need some space reserved in the kernel - * image. Both require pgd, pud (4 levels only) and pmd tables to (section) - * map the kernel. With the 64K page configuration, swapper and idmap need to - * map to pte level. The swapper also maps the FDT (see __create_page_tables - * for more information). Note that the number of ID map translation levels - * could be increased on the fly if system RAM is out of reach for the default - * VA range, so pages required to map highest possible PA are reserved in all - * cases. - */ -#ifdef CONFIG_ARM64_4K_PAGES -#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) +#if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN +#define SWAPPER_BLOCK_SHIFT PMD_SHIFT +#define SWAPPER_SKIP_LEVEL 1 #else -#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) +#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT +#define SWAPPER_SKIP_LEVEL 0 #endif +#define SWAPPER_BLOCK_SIZE (UL(1) << SWAPPER_BLOCK_SHIFT) +#define SWAPPER_TABLE_SHIFT (SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3) +#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL) +#define INIT_IDMAP_PGTABLE_LEVELS (IDMAP_LEVELS - SWAPPER_SKIP_LEVEL) + +#define IDMAP_VA_BITS 48 +#define IDMAP_LEVELS ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS) +#define IDMAP_ROOT_LEVEL (4 - IDMAP_LEVELS) /* * A relocatable kernel may execute from an address that differs from the one at @@ -50,57 +49,39 @@ #define EARLY_ENTRIES(vstart, vend, shift, add) \ (SPAN_NR_ENTRIES(vstart, vend, shift) + (add)) -#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) - -#if SWAPPER_PGTABLE_LEVELS > 3 -#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add)) -#else -#define EARLY_PUDS(vstart, vend, add) (0) -#endif +#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \ + (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0) -#if SWAPPER_PGTABLE_LEVELS > 2 -#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add)) -#else -#define EARLY_PMDS(vstart, vend, add) (0) -#endif +#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \ + + EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \ + + EARLY_LEVEL(2, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \ + + EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */ +#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \ + + EARLY_SEGMENT_EXTRA_PAGES)) -#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \ - + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \ - + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \ - + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */ -#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE)) +#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1)) +#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE) -/* the initial ID map may need two extra pages if it needs to be extended */ -#if VA_BITS < 48 -#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE) -#else -#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE) -#endif -#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) +#define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1) +#define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE) -/* Initial memory map size */ -#ifdef CONFIG_ARM64_4K_PAGES -#define SWAPPER_BLOCK_SHIFT PMD_SHIFT -#define SWAPPER_BLOCK_SIZE PMD_SIZE -#define SWAPPER_TABLE_SHIFT PUD_SHIFT -#else -#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT -#define SWAPPER_BLOCK_SIZE PAGE_SIZE -#define SWAPPER_TABLE_SHIFT PMD_SHIFT -#endif +/* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */ +#define KERNEL_SEGMENT_COUNT 5 +#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN +#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1) /* - * Initial memory map attributes. + * The initial ID map consists of the kernel image, mapped as two separate + * segments, and may appear misaligned wrt the swapper block size. This means + * we need 3 additional pages. The DT could straddle a swapper block boundary, + * so it may need 2. */ -#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | PTE_UXN) -#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | PTE_UXN) - -#ifdef CONFIG_ARM64_4K_PAGES -#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS | PTE_WRITE) -#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) +#define EARLY_IDMAP_EXTRA_PAGES 3 +#define EARLY_IDMAP_EXTRA_FDT_PAGES 2 #else -#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE) -#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY) +#define EARLY_SEGMENT_EXTRA_PAGES 0 +#define EARLY_IDMAP_EXTRA_PAGES 0 +#define EARLY_IDMAP_EXTRA_FDT_PAGES 0 #endif #endif /* __ASM_KERNEL_PGTABLE_H */ |