diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/arm64/kernel/vmlinux.lds.S | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm64/kernel/vmlinux.lds.S')
-rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 308 |
1 files changed, 308 insertions, 0 deletions
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S new file mode 100644 index 000000000..71f4b5f24 --- /dev/null +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ld script to make ARM Linux kernel + * taken from the i386 version by Russell King + * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> + */ + +#define RO_EXCEPTION_TABLE_ALIGN 8 +#define RUNTIME_DISCARD_EXIT + +#include <asm-generic/vmlinux.lds.h> +#include <asm/cache.h> +#include <asm/hyp_image.h> +#include <asm/kernel-pgtable.h> +#include <asm/memory.h> +#include <asm/page.h> + +#include "image.h" + +OUTPUT_ARCH(aarch64) +ENTRY(_text) + +jiffies = jiffies_64; + + +#ifdef CONFIG_KVM +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + +#define HYPERVISOR_PERCPU_SECTION \ + . = ALIGN(PAGE_SIZE); \ + HYP_SECTION_NAME(.data..percpu) : { \ + *(HYP_SECTION_NAME(.data..percpu)) \ + } +#else /* CONFIG_KVM */ +#define HYPERVISOR_EXTABLE +#define HYPERVISOR_PERCPU_SECTION +#endif + +#define HYPERVISOR_TEXT \ + /* \ + * Align to 4 KB so that \ + * a) the HYP vector table is at its minimum \ + * alignment of 2048 bytes \ + * b) the HYP init code will not cross a page \ + * boundary if its size does not exceed \ + * 4 KB (see related ASSERT() below) \ + */ \ + . = ALIGN(SZ_4K); \ + __hyp_idmap_text_start = .; \ + *(.hyp.idmap.text) \ + __hyp_idmap_text_end = .; \ + __hyp_text_start = .; \ + *(.hyp.text) \ + HYPERVISOR_EXTABLE \ + __hyp_text_end = .; + +#define IDMAP_TEXT \ + . = ALIGN(SZ_4K); \ + __idmap_text_start = .; \ + *(.idmap.text) \ + __idmap_text_end = .; + +#ifdef CONFIG_HIBERNATION +#define HIBERNATE_TEXT \ + . = ALIGN(SZ_4K); \ + __hibernate_exit_text_start = .; \ + *(.hibernate_exit.text) \ + __hibernate_exit_text_end = .; +#else +#define HIBERNATE_TEXT +#endif + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + __entry_tramp_text_start = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + __entry_tramp_text_end = .; +#else +#define TRAMP_TEXT +#endif + +/* + * The size of the PE/COFF section that covers the kernel image, which + * runs from _stext to _edata, must be a round multiple of the PE/COFF + * FileAlignment, which we set to its minimum value of 0x200. '_stext' + * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned + * boundary should be sufficient. + */ +PECOFF_FILE_ALIGNMENT = 0x200; + +#ifdef CONFIG_EFI +#define PECOFF_EDATA_PADDING \ + .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } +#else +#define PECOFF_EDATA_PADDING +#endif + +SECTIONS +{ + /* + * XXX: The linker does not define how output sections are + * assigned to input sections when there are multiple statements + * matching the same input section name. There is no documented + * order of matching. + */ + DISCARDS + /DISCARD/ : { + *(.interp .dynamic) + *(.dynsym .dynstr .hash .gnu.hash) + } + + . = KIMAGE_VADDR; + + .head.text : { + _text = .; + HEAD_TEXT + } + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + ENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + HYPERVISOR_TEXT + IDMAP_TEXT + HIBERNATE_TEXT + TRAMP_TEXT + *(.fixup) + *(.gnu.warning) + . = ALIGN(16); + *(.got) /* Global offset table */ + } + + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the lazy dispatch entries. + */ + .got.plt : { *(.got.plt) } + ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, + "Unexpected GOT/PLT entries detected!") + + . = ALIGN(SEGMENT_ALIGN); + _etext = .; /* End of text section */ + + /* everything from this point to __init_begin will be marked RO NX */ + RO_DATA(PAGE_SIZE) + + idmap_pg_dir = .; + . += IDMAP_DIR_SIZE; + idmap_pg_end = .; + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + + reserved_pg_dir = .; + . += PAGE_SIZE; + + swapper_pg_dir = .; + . += PAGE_SIZE; + + . = ALIGN(SEGMENT_ALIGN); + __init_begin = .; + __inittext_begin = .; + + INIT_TEXT_SECTION(8) + + __exittext_begin = .; + .exit.text : { + EXIT_TEXT + } + __exittext_end = .; + + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + + . = ALIGN(SEGMENT_ALIGN); + __inittext_end = .; + __initdata_begin = .; + + .init.data : { + INIT_DATA + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + INIT_RAM_FS + *(.init.rodata.* .init.bss) /* from the EFI stub */ + } + .exit.data : { + EXIT_DATA + } + + PERCPU_SECTION(L1_CACHE_BYTES) + HYPERVISOR_PERCPU_SECTION + + .rela.dyn : ALIGN(8) { + *(.rela .rela*) + } + + __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); + __rela_size = SIZEOF(.rela.dyn); + +#ifdef CONFIG_RELR + .relr.dyn : ALIGN(8) { + *(.relr.dyn) + } + + __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR); + __relr_size = SIZEOF(.relr.dyn); +#endif + + . = ALIGN(SEGMENT_ALIGN); + __initdata_end = .; + __init_end = .; + + _data = .; + _sdata = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) + + /* + * Data written with the MMU off but read with the MMU on requires + * cache lines to be invalidated, discarding up to a Cache Writeback + * Granule (CWG) of data from the cache. Keep the section that + * requires this type of maintenance to be in its own Cache Writeback + * Granule (CWG) area so the cache maintenance operations don't + * interfere with adjacent data. + */ + .mmuoff.data.write : ALIGN(SZ_2K) { + __mmuoff_data_start = .; + *(.mmuoff.data.write) + } + . = ALIGN(SZ_2K); + .mmuoff.data.read : { + *(.mmuoff.data.read) + __mmuoff_data_end = .; + } + + PECOFF_EDATA_PADDING + __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); + _edata = .; + + BSS_SECTION(0, 0, 0) + + . = ALIGN(PAGE_SIZE); + init_pg_dir = .; + . += INIT_DIR_SIZE; + init_pg_end = .; + + . = ALIGN(SEGMENT_ALIGN); + __pecoff_data_size = ABSOLUTE(. - __initdata_begin); + _end = .; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + HEAD_SYMBOLS + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .plt : { + *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .data.rel.ro : { *(.data.rel.ro) } + ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") +} + +#include "image-vars.h" + +/* + * The HYP init code and ID map text can't be longer than a page each, + * and should not cross a page boundary. + */ +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "HYP init code too big or misaligned") +ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "ID map text too big or misaligned") +#ifdef CONFIG_HIBERNATION +ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) + <= SZ_4K, "Hibernate exit text too big or misaligned") +#endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, + "Entry trampoline text too big") +#endif +/* + * If padding is applied before .head.text, virt<->phys conversions will fail. + */ +ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") |