diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /arch/arm64/kernel/process.c | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm64/kernel/process.c')
-rw-r--r-- | arch/arm64/kernel/process.c | 757 |
1 files changed, 757 insertions, 0 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c new file mode 100644 index 000000000..044a7d7f1 --- /dev/null +++ b/arch/arm64/kernel/process.c @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/kernel/process.c + * + * Original Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Copyright (C) 2012 ARM Ltd. + */ +#include <linux/compat.h> +#include <linux/efi.h> +#include <linux/elf.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/kernel.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/nospec.h> +#include <linux/stddef.h> +#include <linux/sysctl.h> +#include <linux/unistd.h> +#include <linux/user.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/elfcore.h> +#include <linux/pm.h> +#include <linux/tick.h> +#include <linux/utsname.h> +#include <linux/uaccess.h> +#include <linux/random.h> +#include <linux/hw_breakpoint.h> +#include <linux/personality.h> +#include <linux/notifier.h> +#include <trace/events/power.h> +#include <linux/percpu.h> +#include <linux/thread_info.h> +#include <linux/prctl.h> +#include <linux/stacktrace.h> + +#include <asm/alternative.h> +#include <asm/compat.h> +#include <asm/cpufeature.h> +#include <asm/cacheflush.h> +#include <asm/exec.h> +#include <asm/fpsimd.h> +#include <asm/mmu_context.h> +#include <asm/mte.h> +#include <asm/processor.h> +#include <asm/pointer_auth.h> +#include <asm/stacktrace.h> +#include <asm/switch_to.h> +#include <asm/system_misc.h> + +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __ro_after_init; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +/* + * Function pointers to optional machine specific functions + */ +void (*pm_power_off)(void); +EXPORT_SYMBOL_GPL(pm_power_off); + +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + cpu_die(); +} +#endif + +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. + */ +void machine_shutdown(void) +{ + smp_shutdown_nonboot_cpus(reboot_cpu); +} + +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ +void machine_halt(void) +{ + local_irq_disable(); + smp_send_stop(); + while (1); +} + +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ +void machine_power_off(void) +{ + local_irq_disable(); + smp_send_stop(); + do_kernel_power_off(); +} + +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ +void machine_restart(char *cmd) +{ + /* Disable interrupts first */ + local_irq_disable(); + smp_send_stop(); + + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + + /* Now call the architecture specific reboot code. */ + do_kernel_restart(cmd); + + /* + * Whoops - the architecture was unable to reboot. + */ + printk("Reboot failed -- System halted\n"); + while (1); +} + +#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str +static const char *const btypes[] = { + bstr(NONE, "--"), + bstr( JC, "jc"), + bstr( C, "-c"), + bstr( J , "j-") +}; +#undef bstr + +static void print_pstate(struct pt_regs *regs) +{ + u64 pstate = regs->pstate; + + if (compat_user_mode(regs)) { + printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", + pstate, + pstate & PSR_AA32_N_BIT ? 'N' : 'n', + pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', + pstate & PSR_AA32_C_BIT ? 'C' : 'c', + pstate & PSR_AA32_V_BIT ? 'V' : 'v', + pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', + pstate & PSR_AA32_T_BIT ? "T32" : "A32", + pstate & PSR_AA32_E_BIT ? "BE" : "LE", + pstate & PSR_AA32_A_BIT ? 'A' : 'a', + pstate & PSR_AA32_I_BIT ? 'I' : 'i', + pstate & PSR_AA32_F_BIT ? 'F' : 'f', + pstate & PSR_AA32_DIT_BIT ? '+' : '-', + pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); + } else { + const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> + PSR_BTYPE_SHIFT]; + + printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", + pstate, + pstate & PSR_N_BIT ? 'N' : 'n', + pstate & PSR_Z_BIT ? 'Z' : 'z', + pstate & PSR_C_BIT ? 'C' : 'c', + pstate & PSR_V_BIT ? 'V' : 'v', + pstate & PSR_D_BIT ? 'D' : 'd', + pstate & PSR_A_BIT ? 'A' : 'a', + pstate & PSR_I_BIT ? 'I' : 'i', + pstate & PSR_F_BIT ? 'F' : 'f', + pstate & PSR_PAN_BIT ? '+' : '-', + pstate & PSR_UAO_BIT ? '+' : '-', + pstate & PSR_TCO_BIT ? '+' : '-', + pstate & PSR_DIT_BIT ? '+' : '-', + pstate & PSR_SSBS_BIT ? '+' : '-', + btype_str); + } +} + +void __show_regs(struct pt_regs *regs) +{ + int i, top_reg; + u64 lr, sp; + + if (compat_user_mode(regs)) { + lr = regs->compat_lr; + sp = regs->compat_sp; + top_reg = 12; + } else { + lr = regs->regs[30]; + sp = regs->sp; + top_reg = 29; + } + + show_regs_print_info(KERN_DEFAULT); + print_pstate(regs); + + if (!user_mode(regs)) { + printk("pc : %pS\n", (void *)regs->pc); + printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); + } else { + printk("pc : %016llx\n", regs->pc); + printk("lr : %016llx\n", lr); + } + + printk("sp : %016llx\n", sp); + + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + + i = top_reg; + + while (i >= 0) { + printk("x%-2d: %016llx", i, regs->regs[i]); + + while (i-- % 3) + pr_cont(" x%-2d: %016llx", i, regs->regs[i]); + + pr_cont("\n"); + } +} + +void show_regs(struct pt_regs *regs) +{ + __show_regs(regs); + dump_backtrace(regs, NULL, KERN_DEFAULT); +} + +static void tls_thread_flush(void) +{ + write_sysreg(0, tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(0, SYS_TPIDR2_EL0); + + if (is_compat_task()) { + current->thread.uw.tp_value = 0; + + /* + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + */ + barrier(); + write_sysreg(0, tpidrro_el0); + } +} + +static void flush_tagged_addr_state(void) +{ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) + clear_thread_flag(TIF_TAGGED_ADDR); +} + +void flush_thread(void) +{ + fpsimd_flush_thread(); + tls_thread_flush(); + flush_ptrace_hw_breakpoint(current); + flush_tagged_addr_state(); +} + +void arch_release_task_struct(struct task_struct *tsk) +{ + fpsimd_release_task(tsk); +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + if (current->mm) + fpsimd_preserve_current_state(); + *dst = *src; + + /* We rely on the above assignment to initialize dst's thread_flags: */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); + + /* + * Detach src's sve_state (if any) from dst so that it does not + * get erroneously used or freed prematurely. dst's copies + * will be allocated on demand later on if dst uses SVE. + * For consistency, also clear TIF_SVE here: this could be done + * later in copy_process(), but to avoid tripping up future + * maintainers it is best not to leave TIF flags and buffers in + * an inconsistent state, even temporarily. + */ + dst->thread.sve_state = NULL; + clear_tsk_thread_flag(dst, TIF_SVE); + + /* + * In the unlikely event that we create a new thread with ZA + * enabled we should retain the ZA state so duplicate it here. + * This may be shortly freed if we exec() or if CLONE_SETTLS + * but it's simpler to do it here. To avoid confusing the rest + * of the code ensure that we have a sve_state allocated + * whenever za_state is allocated. + */ + if (thread_za_enabled(&src->thread)) { + dst->thread.sve_state = kzalloc(sve_state_size(src), + GFP_KERNEL); + if (!dst->thread.sve_state) + return -ENOMEM; + dst->thread.za_state = kmemdup(src->thread.za_state, + za_state_size(src), + GFP_KERNEL); + if (!dst->thread.za_state) { + kfree(dst->thread.sve_state); + dst->thread.sve_state = NULL; + return -ENOMEM; + } + } else { + dst->thread.za_state = NULL; + clear_tsk_thread_flag(dst, TIF_SME); + } + + /* clear any pending asynchronous tag fault raised by the parent */ + clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); + + return 0; +} + +asmlinkage void ret_from_fork(void) asm("ret_from_fork"); + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long stack_start = args->stack; + unsigned long tls = args->tls; + struct pt_regs *childregs = task_pt_regs(p); + + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + + /* + * In case p was allocated the same task_struct pointer as some + * other recently-exited task, make sure p is disassociated from + * any cpu that may have run that now-exited task recently. + * Otherwise we could erroneously skip reloading the FPSIMD + * registers for p. + */ + fpsimd_flush_task_state(p); + + ptrauth_thread_init_kernel(p); + + if (likely(!args->fn)) { + *childregs = *current_pt_regs(); + childregs->regs[0] = 0; + + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + *task_user_tls(p) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2()) + p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + + if (stack_start) { + if (is_compat_thread(task_thread_info(p))) + childregs->compat_sp = stack_start; + else + childregs->sp = stack_start; + } + + /* + * If a TLS pointer was passed to clone, use it for the new + * thread. We also reset TPIDR2 if it's in use. + */ + if (clone_flags & CLONE_SETTLS) { + p->thread.uw.tp_value = tls; + p->thread.tpidr2_el0 = 0; + } + } else { + /* + * A kthread has no context to ERET to, so ensure any buggy + * ERET is treated as an illegal exception return. + * + * When a user task is created from a kthread, childregs will + * be initialized by start_thread() or start_compat_thread(). + */ + memset(childregs, 0, sizeof(struct pt_regs)); + childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; + + p->thread.cpu_context.x19 = (unsigned long)args->fn; + p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; + } + p->thread.cpu_context.pc = (unsigned long)ret_from_fork; + p->thread.cpu_context.sp = (unsigned long)childregs; + /* + * For the benefit of the unwinder, set up childregs->stackframe + * as the final frame for the new task. + */ + p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; + + ptrace_hw_copy_thread(p); + + return 0; +} + +void tls_preserve_current_state(void) +{ + *task_user_tls(current) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2() && !is_compat_task()) + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); +} + +static void tls_thread_switch(struct task_struct *next) +{ + tls_preserve_current_state(); + + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.uw.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); + + write_sysreg(*task_user_tls(next), tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); +} + +/* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpus_have_const_cap(ARM64_SSBS)) + return; + + spectre_v4_enable_task_mitigation(next); +} + +/* + * We store our current task in sp_el0, which is clobbered by userspace. Keep a + * shadow copy so that we can restore this upon entry from userspace. + * + * This is *only* for exception entry from EL0, and is not valid until we + * __switch_to() a user task. + */ +DEFINE_PER_CPU(struct task_struct *, __entry_task); + +static void entry_task_switch(struct task_struct *next) +{ + __this_cpu_write(__entry_task, next); +} + +/* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Ensure access is disabled when switching to a 32bit task, ensure + * access is enabled when switching to a 64bit task. + */ +static void erratum_1418040_thread_switch(struct task_struct *next) +{ + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || + !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + return; + + if (is_compat_thread(task_thread_info(next))) + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); + else + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); +} + +static void erratum_1418040_new_exec(void) +{ + preempt_disable(); + erratum_1418040_thread_switch(current); + preempt_enable(); +} + +/* + * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore + * this function must be called with preemption disabled and the update to + * sctlr_user must be made in the same preemption disabled block so that + * __switch_to() does not see the variable update before the SCTLR_EL1 one. + */ +void update_sctlr_el1(u64 sctlr) +{ + /* + * EnIA must not be cleared while in the kernel as this is necessary for + * in-kernel PAC. It will be cleared on kernel exit if needed. + */ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +/* + * Thread switching. + */ +__notrace_funcgraph __sched +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next) +{ + struct task_struct *last; + + fpsimd_thread_switch(next); + tls_thread_switch(next); + hw_breakpoint_thread_switch(next); + contextidr_thread_switch(next); + entry_task_switch(next); + ssbs_thread_switch(next); + erratum_1418040_thread_switch(next); + ptrauth_thread_switch_user(next); + + /* + * Complete any pending TLB or cache maintenance on this CPU in case + * the thread migrates to a different CPU. + * This full barrier is also required by the membarrier system + * call. + */ + dsb(ish); + + /* + * MTE thread switching must happen after the DSB above to ensure that + * any asynchronous tag check faults have been logged in the TFSR*_EL1 + * registers. + */ + mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); + + /* the actual thread switch */ + last = cpu_switch_to(prev, next); + + return last; +} + +struct wchan_info { + unsigned long pc; + int count; +}; + +static bool get_wchan_cb(void *arg, unsigned long pc) +{ + struct wchan_info *wchan_info = arg; + + if (!in_sched_functions(pc)) { + wchan_info->pc = pc; + return false; + } + return wchan_info->count++ < 16; +} + +unsigned long __get_wchan(struct task_struct *p) +{ + struct wchan_info wchan_info = { + .pc = 0, + .count = 0, + }; + + if (!try_get_task_stack(p)) + return 0; + + arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); + + put_task_stack(p); + + return wchan_info.pc; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= prandom_u32_max(PAGE_SIZE); + return sp & ~0xf; +} + +#ifdef CONFIG_COMPAT +int compat_elf_check_arch(const struct elf32_hdr *hdr) +{ + if (!system_supports_32bit_el0()) + return false; + + if ((hdr)->e_machine != EM_ARM) + return false; + + if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) + return false; + + /* + * Prevent execve() of a 32-bit program from a deadline task + * if the restricted affinity mask would be inadmissible on an + * asymmetric system. + */ + return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || + !dl_task_check_affinity(current, system_32bit_el0_cpumask()); +} +#endif + +/* + * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. + */ +void arch_setup_new_exec(void) +{ + unsigned long mmflags = 0; + + if (is_compat_task()) { + mmflags = MMCF_AARCH32; + + /* + * Restrict the CPU affinity mask for a 32-bit task so that + * it contains only 32-bit-capable CPUs. + * + * From the perspective of the task, this looks similar to + * what would happen if the 64-bit-only CPUs were hot-unplugged + * at the point of execve(), although we try a bit harder to + * honour the cpuset hierarchy. + */ + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) + force_compatible_cpus_allowed_ptr(current); + } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { + relax_compatible_cpus_allowed_ptr(current); + } + + current->mm->context.flags = mmflags; + ptrauth_thread_init_user(); + mte_thread_init_user(); + erratum_1418040_new_exec(); + + if (task_spec_ssb_noexec(current)) { + arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, + PR_SPEC_ENABLE); + } +} + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_disabled; + +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) +{ + unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) + return -EINVAL; + + if (system_supports_mte()) + valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ + | PR_MTE_TAG_MASK; + + if (arg & ~valid_mask) + return -EINVAL; + + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) + return -EINVAL; + + if (set_mte_ctrl(task, arg) != 0) + return -EINVAL; + + update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + + return 0; +} + +long get_tagged_addr_ctrl(struct task_struct *task) +{ + long ret = 0; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) + return -EINVAL; + + if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) + ret = PR_TAGGED_ADDR_ENABLE; + + ret |= get_mte_ctrl(task); + + return ret; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr_disabled", + .mode = 0644, + .data = &tagged_addr_disabled, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { } +}; + +static int __init tagged_addr_init(void) +{ + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; +} + +core_initcall(tagged_addr_init); +#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +#ifdef CONFIG_BINFMT_ELF +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, + bool has_interp, bool is_interp) +{ + /* + * For dynamically linked executables the interpreter is + * responsible for setting PROT_BTI on everything except + * itself. + */ + if (is_interp != has_interp) + return prot; + + if (!(state->flags & ARM64_ELF_BTI)) + return prot; + + if (prot & PROT_EXEC) + prot |= PROT_BTI; + + return prot; +} +#endif |