diff options
Diffstat (limited to 'arch/arm64/kernel/pi')
-rw-r--r-- | arch/arm64/kernel/pi/.gitignore | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/Makefile | 27 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/idreg-override.c | 401 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/kaslr_early.c | 78 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/map_kernel.c | 253 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/map_range.c | 105 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/patch-scs.c | 254 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/pi.h | 36 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/relacheck.c | 130 | ||||
-rw-r--r-- | arch/arm64/kernel/pi/relocate.c | 64 |
10 files changed, 1284 insertions, 67 deletions
diff --git a/arch/arm64/kernel/pi/.gitignore b/arch/arm64/kernel/pi/.gitignore new file mode 100644 index 0000000000..efb29b663e --- /dev/null +++ b/arch/arm64/kernel/pi/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +relacheck diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile index c844a0546d..4393b41f0b 100644 --- a/arch/arm64/kernel/pi/Makefile +++ b/arch/arm64/kernel/pi/Makefile @@ -11,6 +11,9 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \ -fno-asynchronous-unwind-tables -fno-unwind-tables \ $(call cc-option,-fno-addrsig) +# this code may run with the MMU off so disable unaligned accesses +CFLAGS_map_range.o += -mstrict-align + # remove SCS flags from all objects in this directory KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) # disable LTO @@ -22,14 +25,26 @@ KCSAN_SANITIZE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n +hostprogs := relacheck + +quiet_cmd_piobjcopy = $(quiet_cmd_objcopy) + cmd_piobjcopy = $(cmd_objcopy) && $(obj)/relacheck $(@) $(<) + $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ - --remove-section=.note.gnu.property \ - --prefix-alloc-sections=.init -$(obj)/%.pi.o: $(obj)/%.o FORCE - $(call if_changed,objcopy) + --remove-section=.note.gnu.property +$(obj)/%.pi.o: $(obj)/%.o $(obj)/relacheck FORCE + $(call if_changed,piobjcopy) + +# ensure that all the lib- code ends up as __init code and data +$(obj)/lib-%.pi.o: OBJCOPYFLAGS += --prefix-alloc-sections=.init $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE $(call if_changed_rule,cc_o_c) -obj-y := kaslr_early.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o -extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) +obj-y := idreg-override.pi.o \ + map_kernel.pi.o map_range.pi.o \ + lib-fdt.pi.o lib-fdt_ro.pi.o +obj-$(CONFIG_RELOCATABLE) += relocate.pi.o +obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_early.pi.o +obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.pi.o +extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c new file mode 100644 index 0000000000..aad399796e --- /dev/null +++ b/arch/arm64/kernel/pi/idreg-override.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Early cpufeature override framework + * + * Copyright (C) 2020 Google LLC + * Author: Marc Zyngier <maz@kernel.org> + */ + +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/libfdt.h> + +#include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/setup.h> + +#include "pi.h" + +#define FTR_DESC_NAME_LEN 20 +#define FTR_DESC_FIELD_LEN 10 +#define FTR_ALIAS_NAME_LEN 30 +#define FTR_ALIAS_OPTION_LEN 116 + +static u64 __boot_status __initdata; + +typedef bool filter_t(u64 val); + +struct ftr_set_desc { + char name[FTR_DESC_NAME_LEN]; + PREL64(struct arm64_ftr_override, override); + struct { + char name[FTR_DESC_FIELD_LEN]; + u8 shift; + u8 width; + PREL64(filter_t, filter); + } fields[]; +}; + +#define FIELD(n, s, f) { .name = n, .shift = s, .width = 4, .filter = f } + +static bool __init mmfr1_vh_filter(u64 val) +{ + /* + * If we ever reach this point while running VHE, we're + * guaranteed to be on one of these funky, VHE-stuck CPUs. If + * the user was trying to force nVHE on us, proceed with + * attitude adjustment. + */ + return !(__boot_status == (BOOT_CPU_FLAG_E2H | BOOT_CPU_MODE_EL2) && + val == 0); +} + +static const struct ftr_set_desc mmfr1 __prel64_initconst = { + .name = "id_aa64mmfr1", + .override = &id_aa64mmfr1_override, + .fields = { + FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter), + {} + }, +}; + + +static bool __init mmfr2_varange_filter(u64 val) +{ + int __maybe_unused feat; + + if (val) + return false; + +#ifdef CONFIG_ARM64_LPA2 + feat = cpuid_feature_extract_signed_field(read_sysreg(id_aa64mmfr0_el1), + ID_AA64MMFR0_EL1_TGRAN_SHIFT); + if (feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2) { + id_aa64mmfr0_override.val |= + (ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT; + id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT; + } +#endif + return true; +} + +static const struct ftr_set_desc mmfr2 __prel64_initconst = { + .name = "id_aa64mmfr2", + .override = &id_aa64mmfr2_override, + .fields = { + FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter), + {} + }, +}; + +static bool __init pfr0_sve_filter(u64 val) +{ + /* + * Disabling SVE also means disabling all the features that + * are associated with it. The easiest way to do it is just to + * override id_aa64zfr0_el1 to be 0. + */ + if (!val) { + id_aa64zfr0_override.val = 0; + id_aa64zfr0_override.mask = GENMASK(63, 0); + } + + return true; +} + +static const struct ftr_set_desc pfr0 __prel64_initconst = { + .name = "id_aa64pfr0", + .override = &id_aa64pfr0_override, + .fields = { + FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter), + {} + }, +}; + +static bool __init pfr1_sme_filter(u64 val) +{ + /* + * Similarly to SVE, disabling SME also means disabling all + * the features that are associated with it. Just set + * id_aa64smfr0_el1 to 0 and don't look back. + */ + if (!val) { + id_aa64smfr0_override.val = 0; + id_aa64smfr0_override.mask = GENMASK(63, 0); + } + + return true; +} + +static const struct ftr_set_desc pfr1 __prel64_initconst = { + .name = "id_aa64pfr1", + .override = &id_aa64pfr1_override, + .fields = { + FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ), + FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL), + FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter), + {} + }, +}; + +static const struct ftr_set_desc isar1 __prel64_initconst = { + .name = "id_aa64isar1", + .override = &id_aa64isar1_override, + .fields = { + FIELD("gpi", ID_AA64ISAR1_EL1_GPI_SHIFT, NULL), + FIELD("gpa", ID_AA64ISAR1_EL1_GPA_SHIFT, NULL), + FIELD("api", ID_AA64ISAR1_EL1_API_SHIFT, NULL), + FIELD("apa", ID_AA64ISAR1_EL1_APA_SHIFT, NULL), + {} + }, +}; + +static const struct ftr_set_desc isar2 __prel64_initconst = { + .name = "id_aa64isar2", + .override = &id_aa64isar2_override, + .fields = { + FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL), + FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL), + FIELD("mops", ID_AA64ISAR2_EL1_MOPS_SHIFT, NULL), + {} + }, +}; + +static const struct ftr_set_desc smfr0 __prel64_initconst = { + .name = "id_aa64smfr0", + .override = &id_aa64smfr0_override, + .fields = { + FIELD("smever", ID_AA64SMFR0_EL1_SMEver_SHIFT, NULL), + /* FA64 is a one bit field... :-/ */ + { "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, }, + {} + }, +}; + +static bool __init hvhe_filter(u64 val) +{ + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); + + return (val == 1 && + lower_32_bits(__boot_status) == BOOT_CPU_MODE_EL2 && + cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_EL1_VH_SHIFT)); +} + +static const struct ftr_set_desc sw_features __prel64_initconst = { + .name = "arm64_sw", + .override = &arm64_sw_feature_override, + .fields = { + FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL), + FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter), + FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL), + {} + }, +}; + +static const +PREL64(const struct ftr_set_desc, reg) regs[] __prel64_initconst = { + { &mmfr1 }, + { &mmfr2 }, + { &pfr0 }, + { &pfr1 }, + { &isar1 }, + { &isar2 }, + { &smfr0 }, + { &sw_features }, +}; + +static const struct { + char alias[FTR_ALIAS_NAME_LEN]; + char feature[FTR_ALIAS_OPTION_LEN]; +} aliases[] __initconst = { + { "kvm_arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, + { "kvm_arm.mode=protected", "id_aa64mmfr1.vh=0" }, + { "arm64.nosve", "id_aa64pfr0.sve=0" }, + { "arm64.nosme", "id_aa64pfr1.sme=0" }, + { "arm64.nobti", "id_aa64pfr1.bt=0" }, + { "arm64.nopauth", + "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 " + "id_aa64isar1.api=0 id_aa64isar1.apa=0 " + "id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" }, + { "arm64.nomops", "id_aa64isar2.mops=0" }, + { "arm64.nomte", "id_aa64pfr1.mte=0" }, + { "nokaslr", "arm64_sw.nokaslr=1" }, + { "rodata=off", "arm64_sw.rodataoff=1" }, + { "arm64.nolva", "id_aa64mmfr2.varange=0" }, +}; + +static int __init parse_hexdigit(const char *p, u64 *v) +{ + // skip "0x" if it comes next + if (p[0] == '0' && tolower(p[1]) == 'x') + p += 2; + + // check whether the RHS is a single hex digit + if (!isxdigit(p[0]) || (p[1] && !isspace(p[1]))) + return -EINVAL; + + *v = tolower(*p) - (isdigit(*p) ? '0' : 'a' - 10); + return 0; +} + +static int __init find_field(const char *cmdline, char *opt, int len, + const struct ftr_set_desc *reg, int f, u64 *v) +{ + int flen = strlen(reg->fields[f].name); + + // append '<fieldname>=' to obtain '<name>.<fieldname>=' + memcpy(opt + len, reg->fields[f].name, flen); + len += flen; + opt[len++] = '='; + + if (memcmp(cmdline, opt, len)) + return -1; + + return parse_hexdigit(cmdline + len, v); +} + +static void __init match_options(const char *cmdline) +{ + char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2]; + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + const struct ftr_set_desc *reg = prel64_pointer(regs[i].reg); + struct arm64_ftr_override *override; + int len = strlen(reg->name); + int f; + + override = prel64_pointer(reg->override); + + // set opt[] to '<name>.' + memcpy(opt, reg->name, len); + opt[len++] = '.'; + + for (f = 0; reg->fields[f].name[0] != '\0'; f++) { + u64 shift = reg->fields[f].shift; + u64 width = reg->fields[f].width ?: 4; + u64 mask = GENMASK_ULL(shift + width - 1, shift); + bool (*filter)(u64 val); + u64 v; + + if (find_field(cmdline, opt, len, reg, f, &v)) + continue; + + /* + * If an override gets filtered out, advertise + * it by setting the value to the all-ones while + * clearing the mask... Yes, this is fragile. + */ + filter = prel64_pointer(reg->fields[f].filter); + if (filter && !filter(v)) { + override->val |= mask; + override->mask &= ~mask; + continue; + } + + override->val &= ~mask; + override->val |= (v << shift) & mask; + override->mask |= mask; + + return; + } + } +} + +static __init void __parse_cmdline(const char *cmdline, bool parse_aliases) +{ + do { + char buf[256]; + size_t len; + int i; + + cmdline = skip_spaces(cmdline); + + /* terminate on "--" appearing on the command line by itself */ + if (cmdline[0] == '-' && cmdline[1] == '-' && isspace(cmdline[2])) + return; + + for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++) { + if (len >= sizeof(buf) - 1) + break; + if (cmdline[len] == '-') + buf[len] = '_'; + else + buf[len] = cmdline[len]; + } + if (!len) + return; + + buf[len] = 0; + + cmdline += len; + + match_options(buf); + + for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++) + if (!memcmp(buf, aliases[i].alias, len + 1)) + __parse_cmdline(aliases[i].feature, false); + } while (1); +} + +static __init const u8 *get_bootargs_cmdline(const void *fdt, int node) +{ + static char const bootargs[] __initconst = "bootargs"; + const u8 *prop; + + if (node < 0) + return NULL; + + prop = fdt_getprop(fdt, node, bootargs, NULL); + if (!prop) + return NULL; + + return strlen(prop) ? prop : NULL; +} + +static __init void parse_cmdline(const void *fdt, int chosen) +{ + static char const cmdline[] __initconst = CONFIG_CMDLINE; + const u8 *prop = get_bootargs_cmdline(fdt, chosen); + + if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop) + __parse_cmdline(cmdline, true); + + if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop) + __parse_cmdline(prop, true); +} + +void __init init_feature_override(u64 boot_status, const void *fdt, + int chosen) +{ + struct arm64_ftr_override *override; + const struct ftr_set_desc *reg; + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + reg = prel64_pointer(regs[i].reg); + override = prel64_pointer(reg->override); + + override->val = 0; + override->mask = 0; + } + + __boot_status = boot_status; + + parse_cmdline(fdt, chosen); + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + reg = prel64_pointer(regs[i].reg); + override = prel64_pointer(reg->override); + dcache_clean_inval_poc((unsigned long)override, + (unsigned long)(override + 1)); + } +} + +char * __init skip_spaces(const char *str) +{ + while (isspace(*str)) + ++str; + return (char *)str; +} diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c index 17bff6e399..0257b43819 100644 --- a/arch/arm64/kernel/pi/kaslr_early.c +++ b/arch/arm64/kernel/pi/kaslr_early.c @@ -14,69 +14,23 @@ #include <asm/archrandom.h> #include <asm/memory.h> +#include <asm/pgtable.h> -/* taken from lib/string.c */ -static char *__strstr(const char *s1, const char *s2) -{ - size_t l1, l2; - - l2 = strlen(s2); - if (!l2) - return (char *)s1; - l1 = strlen(s1); - while (l1 >= l2) { - l1--; - if (!memcmp(s1, s2, l2)) - return (char *)s1; - s1++; - } - return NULL; -} -static bool cmdline_contains_nokaslr(const u8 *cmdline) -{ - const u8 *str; - - str = __strstr(cmdline, "nokaslr"); - return str == cmdline || (str > cmdline && *(str - 1) == ' '); -} - -static bool is_kaslr_disabled_cmdline(void *fdt) -{ - if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { - int node; - const u8 *prop; - - node = fdt_path_offset(fdt, "/chosen"); - if (node < 0) - goto out; - - prop = fdt_getprop(fdt, node, "bootargs", NULL); - if (!prop) - goto out; - - if (cmdline_contains_nokaslr(prop)) - return true; +#include "pi.h" - if (IS_ENABLED(CONFIG_CMDLINE_EXTEND)) - goto out; +extern u16 memstart_offset_seed; - return false; - } -out: - return cmdline_contains_nokaslr(CONFIG_CMDLINE); -} - -static u64 get_kaslr_seed(void *fdt) +static u64 __init get_kaslr_seed(void *fdt, int node) { - int node, len; + static char const seed_str[] __initconst = "kaslr-seed"; fdt64_t *prop; u64 ret; + int len; - node = fdt_path_offset(fdt, "/chosen"); if (node < 0) return 0; - prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); + prop = fdt_getprop_w(fdt, node, seed_str, &len); if (!prop || len != sizeof(u64)) return 0; @@ -85,26 +39,28 @@ static u64 get_kaslr_seed(void *fdt) return ret; } -asmlinkage u64 kaslr_early_init(void *fdt) +u64 __init kaslr_early_init(void *fdt, int chosen) { - u64 seed; + u64 seed, range; - if (is_kaslr_disabled_cmdline(fdt)) + if (kaslr_disabled_cmdline()) return 0; - seed = get_kaslr_seed(fdt); + seed = get_kaslr_seed(fdt, chosen); if (!seed) { if (!__early_cpu_has_rndr() || !__arm64_rndr((unsigned long *)&seed)) return 0; } + memstart_offset_seed = seed & U16_MAX; + /* * OK, so we are proceeding with KASLR enabled. Calculate a suitable * kernel image offset from the seed. Let's place the kernel in the - * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of - * the lower and upper quarters to avoid colliding with other - * allocations. + * 'middle' half of the VMALLOC area, and stay clear of the lower and + * upper quarters to avoid colliding with other allocations. */ - return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0)); + range = (VMALLOC_END - KIMAGE_VADDR) / 2; + return range / 2 + (((__uint128_t)range * seed) >> 64); } diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c new file mode 100644 index 0000000000..5fa08e13e1 --- /dev/null +++ b/arch/arm64/kernel/pi/map_kernel.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2023 Google LLC +// Author: Ard Biesheuvel <ardb@google.com> + +#include <linux/init.h> +#include <linux/libfdt.h> +#include <linux/linkage.h> +#include <linux/types.h> +#include <linux/sizes.h> +#include <linux/string.h> + +#include <asm/memory.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +#include "pi.h" + +extern const u8 __eh_frame_start[], __eh_frame_end[]; + +extern void idmap_cpu_replace_ttbr1(void *pgdir); + +static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset, + void *start, void *end, pgprot_t prot, + bool may_use_cont, int root_level) +{ + map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET, + ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start, + prot, root_level, (pte_t *)pg_dir, may_use_cont, 0); +} + +static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start, + void *end, int root_level) +{ + map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0), + false, root_level); +} + +static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level) +{ + bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS); + bool twopass = IS_ENABLED(CONFIG_RELOCATABLE); + u64 pgdp = (u64)init_pg_dir + PAGE_SIZE; + pgprot_t text_prot = PAGE_KERNEL_ROX; + pgprot_t data_prot = PAGE_KERNEL; + pgprot_t prot; + + /* + * External debuggers may need to write directly to the text mapping to + * install SW breakpoints. Allow this (only) when explicitly requested + * with rodata=off. + */ + if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF)) + text_prot = PAGE_KERNEL_EXEC; + + /* + * We only enable the shadow call stack dynamically if we are running + * on a system that does not implement PAC or BTI. PAC and SCS provide + * roughly the same level of protection, and BTI relies on the PACIASP + * instructions serving as landing pads, preventing us from patching + * those instructions into something else. + */ + if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac()) + enable_scs = false; + + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) { + enable_scs = false; + + /* + * If we have a CPU that supports BTI and a kernel built for + * BTI then mark the kernel executable text as guarded pages + * now so we don't have to rewrite the page tables later. + */ + text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); + } + + /* Map all code read-write on the first pass if needed */ + twopass |= enable_scs; + prot = twopass ? data_prot : text_prot; + + map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot, + !twopass, root_level); + map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata, + __inittext_begin, data_prot, false, root_level); + map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin, + __inittext_end, prot, false, root_level); + map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin, + __initdata_end, data_prot, false, root_level); + map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot, + true, root_level); + dsb(ishst); + + idmap_cpu_replace_ttbr1(init_pg_dir); + + if (twopass) { + if (IS_ENABLED(CONFIG_RELOCATABLE)) + relocate_kernel(kaslr_offset); + + if (enable_scs) { + scs_patch(__eh_frame_start + va_offset, + __eh_frame_end - __eh_frame_start); + asm("ic ialluis"); + + dynamic_scs_is_enabled = true; + } + + /* + * Unmap the text region before remapping it, to avoid + * potential TLB conflicts when creating the contiguous + * descriptors. + */ + unmap_segment(init_pg_dir, va_offset, _stext, _etext, + root_level); + dsb(ishst); + isb(); + __tlbi(vmalle1); + isb(); + + /* + * Remap these segments with different permissions + * No new page table allocations should be needed + */ + map_segment(init_pg_dir, NULL, va_offset, _stext, _etext, + text_prot, true, root_level); + map_segment(init_pg_dir, NULL, va_offset, __inittext_begin, + __inittext_end, text_prot, false, root_level); + } + + /* Copy the root page table to its final location */ + memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE); + dsb(ishst); + idmap_cpu_replace_ttbr1(swapper_pg_dir); +} + +static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr) +{ + u64 sctlr = read_sysreg(sctlr_el1); + u64 tcr = read_sysreg(tcr_el1) | TCR_DS; + + asm(" msr sctlr_el1, %0 ;" + " isb ;" + " msr ttbr0_el1, %1 ;" + " msr tcr_el1, %2 ;" + " isb ;" + " tlbi vmalle1 ;" + " dsb nsh ;" + " isb ;" + " msr sctlr_el1, %3 ;" + " isb ;" + :: "r"(sctlr & ~SCTLR_ELx_M), "r"(ttbr), "r"(tcr), "r"(sctlr)); +} + +static void __init remap_idmap_for_lpa2(void) +{ + /* clear the bits that change meaning once LPA2 is turned on */ + pteval_t mask = PTE_SHARED; + + /* + * We have to clear bits [9:8] in all block or page descriptors in the + * initial ID map, as otherwise they will be (mis)interpreted as + * physical address bits once we flick the LPA2 switch (TCR.DS). Since + * we cannot manipulate live descriptors in that way without creating + * potential TLB conflicts, let's create another temporary ID map in a + * LPA2 compatible fashion, and update the initial ID map while running + * from that. + */ + create_init_idmap(init_pg_dir, mask); + dsb(ishst); + set_ttbr0_for_lpa2((u64)init_pg_dir); + + /* + * Recreate the initial ID map with the same granularity as before. + * Don't bother with the FDT, we no longer need it after this. + */ + memset(init_idmap_pg_dir, 0, + (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end); + + create_init_idmap(init_idmap_pg_dir, mask); + dsb(ishst); + + /* switch back to the updated initial ID map */ + set_ttbr0_for_lpa2((u64)init_idmap_pg_dir); + + /* wipe the temporary ID map from memory */ + memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir); +} + +static void __init map_fdt(u64 fdt) +{ + static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE); + u64 efdt = fdt + MAX_FDT_SIZE; + u64 ptep = (u64)ptes; + + /* + * Map up to MAX_FDT_SIZE bytes, but avoid overlap with + * the kernel image. + */ + map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt, + fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL, + (pte_t *)init_idmap_pg_dir, false, 0); + dsb(ishst); +} + +asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt) +{ + static char const chosen_str[] __initconst = "/chosen"; + u64 va_base, pa_base = (u64)&_text; + u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN; + int root_level = 4 - CONFIG_PGTABLE_LEVELS; + int va_bits = VA_BITS; + int chosen; + + map_fdt((u64)fdt); + + /* Clear BSS and the initial page tables */ + memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start); + + /* Parse the command line for CPU feature overrides */ + chosen = fdt_path_offset(fdt, chosen_str); + init_feature_override(boot_status, fdt, chosen); + + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) { + va_bits = VA_BITS_MIN; + } else if (IS_ENABLED(CONFIG_ARM64_LPA2) && !cpu_has_lpa2()) { + va_bits = VA_BITS_MIN; + root_level++; + } + + if (va_bits > VA_BITS_MIN) + sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits)); + + /* + * The virtual KASLR displacement modulo 2MiB is decided by the + * physical placement of the image, as otherwise, we might not be able + * to create the early kernel mapping using 2 MiB block descriptors. So + * take the low bits of the KASLR offset from the physical address, and + * fill in the high bits from the seed. + */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + u64 kaslr_seed = kaslr_early_init(fdt, chosen); + + if (kaslr_seed && kaslr_requires_kpti()) + arm64_use_ng_mappings = true; + + kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1); + } + + if (IS_ENABLED(CONFIG_ARM64_LPA2) && va_bits > VA_BITS_MIN) + remap_idmap_for_lpa2(); + + va_base = KIMAGE_VADDR + kaslr_offset; + map_kernel(kaslr_offset, va_base - pa_base, root_level); +} diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c new file mode 100644 index 0000000000..5410b2cac5 --- /dev/null +++ b/arch/arm64/kernel/pi/map_range.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2023 Google LLC +// Author: Ard Biesheuvel <ardb@google.com> + +#include <linux/types.h> +#include <linux/sizes.h> + +#include <asm/memory.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> + +#include "pi.h" + +/** + * map_range - Map a contiguous range of physical pages into virtual memory + * + * @pte: Address of physical pointer to array of pages to + * allocate page tables from + * @start: Virtual address of the start of the range + * @end: Virtual address of the end of the range (exclusive) + * @pa: Physical address of the start of the range + * @prot: Access permissions of the range + * @level: Translation level for the mapping + * @tbl: The level @level page table to create the mappings in + * @may_use_cont: Whether the use of the contiguous attribute is allowed + * @va_offset: Offset between a physical page and its current mapping + * in the VA space + */ +void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot, + int level, pte_t *tbl, bool may_use_cont, u64 va_offset) +{ + u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX; + u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK; + int lshift = (3 - level) * (PAGE_SHIFT - 3); + u64 lmask = (PAGE_SIZE << lshift) - 1; + + start &= PAGE_MASK; + pa &= PAGE_MASK; + + /* Advance tbl to the entry that covers start */ + tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE; + + /* + * Set the right block/page bits for this level unless we are + * clearing the mapping + */ + if (protval) + protval |= (level < 3) ? PMD_TYPE_SECT : PTE_TYPE_PAGE; + + while (start < end) { + u64 next = min((start | lmask) + 1, PAGE_ALIGN(end)); + + if (level < 3 && (start | next | pa) & lmask) { + /* + * This chunk needs a finer grained mapping. Create a + * table mapping if necessary and recurse. + */ + if (pte_none(*tbl)) { + *tbl = __pte(__phys_to_pte_val(*pte) | + PMD_TYPE_TABLE | PMD_TABLE_UXN); + *pte += PTRS_PER_PTE * sizeof(pte_t); + } + map_range(pte, start, next, pa, prot, level + 1, + (pte_t *)(__pte_to_phys(*tbl) + va_offset), + may_use_cont, va_offset); + } else { + /* + * Start a contiguous range if start and pa are + * suitably aligned + */ + if (((start | pa) & cmask) == 0 && may_use_cont) + protval |= PTE_CONT; + + /* + * Clear the contiguous attribute if the remaining + * range does not cover a contiguous block + */ + if ((end & ~cmask) <= start) + protval &= ~PTE_CONT; + + /* Put down a block or page mapping */ + *tbl = __pte(__phys_to_pte_val(pa) | protval); + } + pa += next - start; + start = next; + tbl++; + } +} + +asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, pteval_t clrmask) +{ + u64 ptep = (u64)pg_dir + PAGE_SIZE; + pgprot_t text_prot = PAGE_KERNEL_ROX; + pgprot_t data_prot = PAGE_KERNEL; + + pgprot_val(text_prot) &= ~clrmask; + pgprot_val(data_prot) &= ~clrmask; + + map_range(&ptep, (u64)_stext, (u64)__initdata_begin, (u64)_stext, + text_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); + map_range(&ptep, (u64)__initdata_begin, (u64)_end, (u64)__initdata_begin, + data_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); + + return ptep; +} diff --git a/arch/arm64/kernel/pi/patch-scs.c b/arch/arm64/kernel/pi/patch-scs.c new file mode 100644 index 0000000000..49d8b40e61 --- /dev/null +++ b/arch/arm64/kernel/pi/patch-scs.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 - Google LLC + * Author: Ard Biesheuvel <ardb@google.com> + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/types.h> + +#include <asm/scs.h> + +#include "pi.h" + +bool dynamic_scs_is_enabled; + +// +// This minimal DWARF CFI parser is partially based on the code in +// arch/arc/kernel/unwind.c, and on the document below: +// https://refspecs.linuxbase.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html +// + +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_negate_ra_state 0x2d +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f +#define DW_CFA_hi_user 0x3f + +enum { + PACIASP = 0xd503233f, + AUTIASP = 0xd50323bf, + SCS_PUSH = 0xf800865e, + SCS_POP = 0xf85f8e5e, +}; + +static void __always_inline scs_patch_loc(u64 loc) +{ + u32 insn = le32_to_cpup((void *)loc); + + switch (insn) { + case PACIASP: + *(u32 *)loc = cpu_to_le32(SCS_PUSH); + break; + case AUTIASP: + *(u32 *)loc = cpu_to_le32(SCS_POP); + break; + default: + /* + * While the DW_CFA_negate_ra_state directive is guaranteed to + * appear right after a PACIASP/AUTIASP instruction, it may + * also appear after a DW_CFA_restore_state directive that + * restores a state that is only partially accurate, and is + * followed by DW_CFA_negate_ra_state directive to toggle the + * PAC bit again. So we permit other instructions here, and ignore + * them. + */ + return; + } + if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_CLEAN_CACHE)) + asm("dc civac, %0" :: "r"(loc)); + else + asm(ALTERNATIVE("dc cvau, %0", "nop", ARM64_HAS_CACHE_IDC) + :: "r"(loc)); +} + +/* + * Skip one uleb128/sleb128 encoded quantity from the opcode stream. All bytes + * except the last one have bit #7 set. + */ +static int __always_inline skip_xleb128(const u8 **opcode, int size) +{ + u8 c; + + do { + c = *(*opcode)++; + size--; + } while (c & BIT(7)); + + return size; +} + +struct eh_frame { + /* + * The size of this frame if 0 < size < U32_MAX, 0 terminates the list. + */ + u32 size; + + /* + * The first frame is a Common Information Entry (CIE) frame, followed + * by one or more Frame Description Entry (FDE) frames. In the former + * case, this field is 0, otherwise it is the negated offset relative + * to the associated CIE frame. + */ + u32 cie_id_or_pointer; + + union { + struct { // CIE + u8 version; + u8 augmentation_string[]; + }; + + struct { // FDE + s32 initial_loc; + s32 range; + u8 opcodes[]; + }; + }; +}; + +static int scs_handle_fde_frame(const struct eh_frame *frame, + bool fde_has_augmentation_data, + int code_alignment_factor, + bool dry_run) +{ + int size = frame->size - offsetof(struct eh_frame, opcodes) + 4; + u64 loc = (u64)offset_to_ptr(&frame->initial_loc); + const u8 *opcode = frame->opcodes; + + if (fde_has_augmentation_data) { + int l; + + // assume single byte uleb128_t + if (WARN_ON(*opcode & BIT(7))) + return -ENOEXEC; + + l = *opcode++; + opcode += l; + size -= l + 1; + } + + /* + * Starting from 'loc', apply the CFA opcodes that advance the location + * pointer, and identify the locations of the PAC instructions. + */ + while (size-- > 0) { + switch (*opcode++) { + case DW_CFA_nop: + case DW_CFA_remember_state: + case DW_CFA_restore_state: + break; + + case DW_CFA_advance_loc1: + loc += *opcode++ * code_alignment_factor; + size--; + break; + + case DW_CFA_advance_loc2: + loc += *opcode++ * code_alignment_factor; + loc += (*opcode++ << 8) * code_alignment_factor; + size -= 2; + break; + + case DW_CFA_def_cfa: + case DW_CFA_offset_extended: + size = skip_xleb128(&opcode, size); + fallthrough; + case DW_CFA_def_cfa_offset: + case DW_CFA_def_cfa_offset_sf: + case DW_CFA_def_cfa_register: + case DW_CFA_same_value: + case DW_CFA_restore_extended: + case 0x80 ... 0xbf: + size = skip_xleb128(&opcode, size); + break; + + case DW_CFA_negate_ra_state: + if (!dry_run) + scs_patch_loc(loc - 4); + break; + + case 0x40 ... 0x7f: + // advance loc + loc += (opcode[-1] & 0x3f) * code_alignment_factor; + break; + + case 0xc0 ... 0xff: + break; + + default: + return -ENOEXEC; + } + } + return 0; +} + +int scs_patch(const u8 eh_frame[], int size) +{ + const u8 *p = eh_frame; + + while (size > 4) { + const struct eh_frame *frame = (const void *)p; + bool fde_has_augmentation_data = true; + int code_alignment_factor = 1; + int ret; + + if (frame->size == 0 || + frame->size == U32_MAX || + frame->size > size) + break; + + if (frame->cie_id_or_pointer == 0) { + const u8 *p = frame->augmentation_string; + + /* a 'z' in the augmentation string must come first */ + fde_has_augmentation_data = *p == 'z'; + + /* + * The code alignment factor is a uleb128 encoded field + * but given that the only sensible values are 1 or 4, + * there is no point in decoding the whole thing. + */ + p += strlen(p) + 1; + if (!WARN_ON(*p & BIT(7))) + code_alignment_factor = *p; + } else { + ret = scs_handle_fde_frame(frame, + fde_has_augmentation_data, + code_alignment_factor, + true); + if (ret) + return ret; + scs_handle_fde_frame(frame, fde_has_augmentation_data, + code_alignment_factor, false); + } + + p += sizeof(frame->size) + frame->size; + size -= sizeof(frame->size) + frame->size; + } + return 0; +} diff --git a/arch/arm64/kernel/pi/pi.h b/arch/arm64/kernel/pi/pi.h new file mode 100644 index 0000000000..c91e5e965c --- /dev/null +++ b/arch/arm64/kernel/pi/pi.h @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2023 Google LLC +// Author: Ard Biesheuvel <ardb@google.com> + +#include <linux/types.h> + +#define __prel64_initconst __section(".init.rodata.prel64") + +#define PREL64(type, name) union { type *name; prel64_t name ## _prel; } + +#define prel64_pointer(__d) (typeof(__d))prel64_to_pointer(&__d##_prel) + +typedef volatile signed long prel64_t; + +static inline void *prel64_to_pointer(const prel64_t *offset) +{ + if (!*offset) + return NULL; + return (void *)offset + *offset; +} + +extern bool dynamic_scs_is_enabled; + +extern pgd_t init_idmap_pg_dir[], init_idmap_pg_end[]; + +void init_feature_override(u64 boot_status, const void *fdt, int chosen); +u64 kaslr_early_init(void *fdt, int chosen); +void relocate_kernel(u64 offset); +int scs_patch(const u8 eh_frame[], int size); + +void map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot, + int level, pte_t *tbl, bool may_use_cont, u64 va_offset); + +asmlinkage void early_map_kernel(u64 boot_status, void *fdt); + +asmlinkage u64 create_init_idmap(pgd_t *pgd, pteval_t clrmask); diff --git a/arch/arm64/kernel/pi/relacheck.c b/arch/arm64/kernel/pi/relacheck.c new file mode 100644 index 0000000000..b0cd4d0d27 --- /dev/null +++ b/arch/arm64/kernel/pi/relacheck.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 - Google LLC + * Author: Ard Biesheuvel <ardb@google.com> + */ + +#include <elf.h> +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define HOST_ORDER ELFDATA2LSB +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define HOST_ORDER ELFDATA2MSB +#endif + +static Elf64_Ehdr *ehdr; +static Elf64_Shdr *shdr; +static const char *strtab; +static bool swap; + +static uint64_t swab_elfxword(uint64_t val) +{ + return swap ? __builtin_bswap64(val) : val; +} + +static uint32_t swab_elfword(uint32_t val) +{ + return swap ? __builtin_bswap32(val) : val; +} + +static uint16_t swab_elfhword(uint16_t val) +{ + return swap ? __builtin_bswap16(val) : val; +} + +int main(int argc, char *argv[]) +{ + struct stat stat; + int fd, ret; + + if (argc < 3) { + fprintf(stderr, "file arguments missing\n"); + exit(EXIT_FAILURE); + } + + fd = open(argv[1], O_RDWR); + if (fd < 0) { + fprintf(stderr, "failed to open %s\n", argv[1]); + exit(EXIT_FAILURE); + } + + ret = fstat(fd, &stat); + if (ret < 0) { + fprintf(stderr, "failed to stat() %s\n", argv[1]); + exit(EXIT_FAILURE); + } + + ehdr = mmap(0, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (ehdr == MAP_FAILED) { + fprintf(stderr, "failed to mmap() %s\n", argv[1]); + exit(EXIT_FAILURE); + } + + swap = ehdr->e_ident[EI_DATA] != HOST_ORDER; + shdr = (void *)ehdr + swab_elfxword(ehdr->e_shoff); + strtab = (void *)ehdr + + swab_elfxword(shdr[swab_elfhword(ehdr->e_shstrndx)].sh_offset); + + for (int i = 0; i < swab_elfhword(ehdr->e_shnum); i++) { + unsigned long info, flags; + bool prel64 = false; + Elf64_Rela *rela; + int numrela; + + if (swab_elfword(shdr[i].sh_type) != SHT_RELA) + continue; + + /* only consider RELA sections operating on data */ + info = swab_elfword(shdr[i].sh_info); + flags = swab_elfxword(shdr[info].sh_flags); + if ((flags & (SHF_ALLOC | SHF_EXECINSTR)) != SHF_ALLOC) + continue; + + /* + * We generally don't permit ABS64 relocations in the code that + * runs before relocation processing occurs. If statically + * initialized absolute symbol references are unavoidable, they + * may be emitted into a *.rodata.prel64 section and they will + * be converted to place-relative 64-bit references. This + * requires special handling in the referring code. + */ + if (strstr(strtab + swab_elfword(shdr[info].sh_name), + ".rodata.prel64")) { + prel64 = true; + } + + rela = (void *)ehdr + swab_elfxword(shdr[i].sh_offset); + numrela = swab_elfxword(shdr[i].sh_size) / sizeof(*rela); + + for (int j = 0; j < numrela; j++) { + uint64_t info = swab_elfxword(rela[j].r_info); + + if (ELF64_R_TYPE(info) != R_AARCH64_ABS64) + continue; + + if (prel64) { + /* convert ABS64 into PREL64 */ + info ^= R_AARCH64_ABS64 ^ R_AARCH64_PREL64; + rela[j].r_info = swab_elfxword(info); + } else { + fprintf(stderr, + "Unexpected absolute relocations detected in %s\n", + argv[2]); + close(fd); + unlink(argv[1]); + exit(EXIT_FAILURE); + } + } + } + close(fd); + return 0; +} diff --git a/arch/arm64/kernel/pi/relocate.c b/arch/arm64/kernel/pi/relocate.c new file mode 100644 index 0000000000..2407d26963 --- /dev/null +++ b/arch/arm64/kernel/pi/relocate.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2023 Google LLC +// Authors: Ard Biesheuvel <ardb@google.com> +// Peter Collingbourne <pcc@google.com> + +#include <linux/elf.h> +#include <linux/init.h> +#include <linux/types.h> + +#include "pi.h" + +extern const Elf64_Rela rela_start[], rela_end[]; +extern const u64 relr_start[], relr_end[]; + +void __init relocate_kernel(u64 offset) +{ + u64 *place = NULL; + + for (const Elf64_Rela *rela = rela_start; rela < rela_end; rela++) { + if (ELF64_R_TYPE(rela->r_info) != R_AARCH64_RELATIVE) + continue; + *(u64 *)(rela->r_offset + offset) = rela->r_addend + offset; + } + + if (!IS_ENABLED(CONFIG_RELR) || !offset) + return; + + /* + * Apply RELR relocations. + * + * RELR is a compressed format for storing relative relocations. The + * encoded sequence of entries looks like: + * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] + * + * i.e. start with an address, followed by any number of bitmaps. The + * address entry encodes 1 relocation. The subsequent bitmap entries + * encode up to 63 relocations each, at subsequent offsets following + * the last address entry. + * + * The bitmap entries must have 1 in the least significant bit. The + * assumption here is that an address cannot have 1 in lsb. Odd + * addresses are not supported. Any odd addresses are stored in the + * RELA section, which is handled above. + * + * With the exception of the least significant bit, each bit in the + * bitmap corresponds with a machine word that follows the base address + * word, and the bit value indicates whether or not a relocation needs + * to be applied to it. The second least significant bit represents the + * machine word immediately following the initial address, and each bit + * that follows represents the next word, in linear order. As such, a + * single bitmap can encode up to 63 relocations in a 64-bit object. + */ + for (const u64 *relr = relr_start; relr < relr_end; relr++) { + if ((*relr & 1) == 0) { + place = (u64 *)(*relr + offset); + *place++ += offset; + } else { + for (u64 *p = place, r = *relr >> 1; r; p++, r >>= 1) + if (r & 1) + *p += offset; + place += 63; + } + } +} |