diff options
Diffstat (limited to 'arch/arm64/Kconfig')
-rw-r--r-- | arch/arm64/Kconfig | 2307 |
1 files changed, 2307 insertions, 0 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig new file mode 100644 index 000000000..c15f71501 --- /dev/null +++ b/arch/arm64/Kconfig @@ -0,0 +1,2307 @@ +# SPDX-License-Identifier: GPL-2.0-only +config ARM64 + def_bool y + select ACPI_CCA_REQUIRED if ACPI + select ACPI_GENERIC_GSI if ACPI + select ACPI_GTDT if ACPI + select ACPI_IORT if ACPI + select ACPI_REDUCED_HARDWARE_ONLY if ACPI + select ACPI_MCFG if (ACPI && PCI) + select ACPI_SPCR_TABLE if ACPI + select ACPI_PPTT if ACPI + select ARCH_HAS_DEBUG_WX + select ARCH_BINFMT_ELF_EXTRA_PHDRS + select ARCH_BINFMT_ELF_STATE + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAS_CACHE_LINE_SIZE + select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_DEBUG_VIRTUAL + select ARCH_HAS_DEBUG_VM_PGTABLE + select ARCH_HAS_DMA_PREP_COHERENT + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_KCOV + select ARCH_HAS_KEEPINITRD + select ARCH_HAS_MEMBARRIER_SYNC_CORE + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + select ARCH_HAS_PTE_DEVMAP + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SETUP_DMA_OPS + select ARCH_HAS_SET_DIRECT_MAP + select ARCH_HAS_SET_MEMORY + select ARCH_STACKWALK + select ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYSCALL_WRAPPER + select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_ZONE_DMA_SET if EXPERT + select ARCH_HAVE_ELF_PROT + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAVE_TRACE_MMIO_ACCESS + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_KEEP_MEMBLOCK + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_GNU_PROPERTY + select ARCH_USE_MEMTEST + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_USE_SYM_ANNOTATIONS + select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS + select ARCH_SUPPORTS_MEMORY_FAILURE + select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK + select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN + select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_CFI_CLANG + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_PAGE_TABLE_CHECK + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select ARCH_WANT_FRAME_POINTERS + select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) + select ARCH_WANT_LD_ORPHAN_WARN + select ARCH_WANTS_NO_INSTR + select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES + select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARM_AMBA + select ARM_ARCH_TIMER + select ARM_GIC + select AUDIT_ARCH_COMPAT_GENERIC + select ARM_GIC_V2M if PCI + select ARM_GIC_V3 + select ARM_GIC_V3_ITS if PCI + select ARM_PSCI_FW + select BUILDTIME_TABLE_SORT + select CLONE_BACKWARDS + select COMMON_CLK + select CPU_PM if (SUSPEND || CPU_IDLE) + select CRC32 + select DCACHE_WORD_ACCESS + select DMA_DIRECT_REMAP + select EDAC_SUPPORT + select FRAME_POINTER + select GENERIC_ALLOCATOR + select GENERIC_ARCH_TOPOLOGY + select GENERIC_CLOCKEVENTS_BROADCAST + select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES + select GENERIC_EARLY_IOREMAP + select GENERIC_IDLE_POLL_SETUP + select GENERIC_IOREMAP + select GENERIC_IRQ_IPI + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_LIB_DEVMEM_IS_ALLOWED + select GENERIC_PCI_IOMAP + select GENERIC_PTDUMP + select GENERIC_SCHED_CLOCK + select GENERIC_SMP_IDLE_THREAD + select GENERIC_TIME_VSYSCALL + select GENERIC_GETTIMEOFDAY + select GENERIC_VDSO_TIME_NS + select HARDIRQS_SW_RESEND + select HAVE_MOVE_PMD + select HAVE_MOVE_PUD + select HAVE_PCI + select HAVE_ACPI_APEI if (ACPI && EFI) + select HAVE_ALIGNED_STRUCT_PAGE if SLUB + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_COMPILER_H + select HAVE_ARCH_HUGE_VMALLOC + select HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE + select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN + select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN + select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) + # Some instrumentation may be unsound, hence EXPERT + select HAVE_ARCH_KCSAN if EXPERT + select HAVE_ARCH_KFENCE + select HAVE_ARCH_KGDB + select HAVE_ARCH_MMAP_RND_BITS + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PREL32_RELOCATIONS + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_STACKLEAK + select HAVE_ARCH_THREAD_STRUCT_WHITELIST + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_VMAP_STACK + select HAVE_ARM_SMCCC + select HAVE_ASM_MODVERSIONS + select HAVE_EBPF_JIT + select HAVE_C_RECORDMCOUNT + select HAVE_CMPXCHG_DOUBLE + select HAVE_CMPXCHG_LOCAL + select HAVE_CONTEXT_TRACKING_USER + select HAVE_DEBUG_KMEMLEAK + select HAVE_DMA_CONTIGUOUS + select HAVE_DYNAMIC_FTRACE + select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ + if DYNAMIC_FTRACE_WITH_REGS + select HAVE_EFFICIENT_UNALIGNED_ACCESS + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_ERROR_INJECTION + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_GCC_PLUGINS + select HAVE_HW_BREAKPOINT if PERF_EVENTS + select HAVE_IOREMAP_PROT + select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_KVM + select HAVE_NMI + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_POSIX_CPU_TIMERS_TASK_WORK + select HAVE_FUNCTION_ARG_ACCESS_API + select MMU_GATHER_RCU_TABLE_FREE + select HAVE_RSEQ + select HAVE_STACKPROTECTOR + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_GENERIC_VDSO + select IRQ_DOMAIN + select IRQ_FORCED_THREADING + select KASAN_VMALLOC if KASAN + select LOCK_MM_AND_FIND_VMA + select MODULES_USE_ELF_RELA + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH + select OF + select OF_EARLY_FLATTREE + select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) + select PCI_SYSCALL if PCI + select POWER_RESET + select POWER_SUPPLY + select SPARSE_IRQ + select SWIOTLB + select SYSCTL_EXCEPTION_TRACE + select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD + select TRACE_IRQFLAGS_SUPPORT + select TRACE_IRQFLAGS_NMI_SUPPORT + select HAVE_SOFTIRQ_ON_OWN_STACK + help + ARM 64-bit (AArch64) Linux support. + +config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS + def_bool CC_IS_CLANG + # https://github.com/ClangBuiltLinux/linux/issues/1507 + depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600)) + select HAVE_DYNAMIC_FTRACE_WITH_REGS + +config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS + def_bool CC_IS_GCC + depends on $(cc-option,-fpatchable-function-entry=2) + select HAVE_DYNAMIC_FTRACE_WITH_REGS + +config 64BIT + def_bool y + +config MMU + def_bool y + +config ARM64_PAGE_SHIFT + int + default 16 if ARM64_64K_PAGES + default 14 if ARM64_16K_PAGES + default 12 + +config ARM64_CONT_PTE_SHIFT + int + default 5 if ARM64_64K_PAGES + default 7 if ARM64_16K_PAGES + default 4 + +config ARM64_CONT_PMD_SHIFT + int + default 5 if ARM64_64K_PAGES + default 5 if ARM64_16K_PAGES + default 4 + +config ARCH_MMAP_RND_BITS_MIN + default 14 if ARM64_64K_PAGES + default 16 if ARM64_16K_PAGES + default 18 + +# max bits determined by the following formula: +# VA_BITS - PAGE_SHIFT - 3 +config ARCH_MMAP_RND_BITS_MAX + default 19 if ARM64_VA_BITS=36 + default 24 if ARM64_VA_BITS=39 + default 27 if ARM64_VA_BITS=42 + default 30 if ARM64_VA_BITS=47 + default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES + default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES + default 33 if ARM64_VA_BITS=48 + default 14 if ARM64_64K_PAGES + default 16 if ARM64_16K_PAGES + default 18 + +config ARCH_MMAP_RND_COMPAT_BITS_MIN + default 7 if ARM64_64K_PAGES + default 9 if ARM64_16K_PAGES + default 11 + +config ARCH_MMAP_RND_COMPAT_BITS_MAX + default 16 + +config NO_IOPORT_MAP + def_bool y if !PCI + +config STACKTRACE_SUPPORT + def_bool y + +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + +config LOCKDEP_SUPPORT + def_bool y + +config GENERIC_BUG + def_bool y + depends on BUG + +config GENERIC_BUG_RELATIVE_POINTERS + def_bool y + depends on GENERIC_BUG + +config GENERIC_HWEIGHT + def_bool y + +config GENERIC_CSUM + def_bool y + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + def_bool y + +config SMP + def_bool y + +config KERNEL_MODE_NEON + def_bool y + +config FIX_EARLYCON_MEM + def_bool y + +config PGTABLE_LEVELS + int + default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 + default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 + default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) + default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 + default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 + default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 + +config ARCH_SUPPORTS_UPROBES + def_bool y + +config ARCH_PROC_KCORE_TEXT + def_bool y + +config BROKEN_GAS_INST + def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) + +config KASAN_SHADOW_OFFSET + hex + depends on KASAN_GENERIC || KASAN_SW_TAGS + default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS + default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS + default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS + default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS + default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS + default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS + default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS + default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS + default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS + default 0xffffffffffffffff + +source "arch/arm64/Kconfig.platforms" + +menu "Kernel Features" + +menu "ARM errata workarounds via the alternatives framework" + +config AMPERE_ERRATUM_AC03_CPU_38 + bool "AmpereOne: AC03_CPU_38: Certain bits in the Virtualization Translation Control Register and Translation Control Registers do not follow RES0 semantics" + default y + help + This option adds an alternative code sequence to work around Ampere + erratum AC03_CPU_38 on AmpereOne. + + The affected design reports FEAT_HAFDBS as not implemented in + ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 + as required by the architecture. The unadvertised HAFDBS + implementation suffers from an additional erratum where hardware + A/D updates can occur after a PTE has been marked invalid. + + The workaround forces KVM to explicitly set VTCR_EL2.HA to 0, + which avoids enabling unadvertised hardware Access Flag management + at stage-2. + + If unsure, say Y. + +config ARM64_WORKAROUND_CLEAN_CACHE + bool + +config ARM64_ERRATUM_826319 + bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" + default y + select ARM64_WORKAROUND_CLEAN_CACHE + help + This option adds an alternative code sequence to work around ARM + erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or + AXI master interface and an L2 cache. + + If a Cortex-A53 uses an AMBA AXI4 ACE interface to other processors + and is unable to accept a certain write via this interface, it will + not progress on read data presented on the read data channel and the + system can deadlock. + + The workaround promotes data cache clean instructions to + data cache clean-and-invalidate. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_827319 + bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect" + default y + select ARM64_WORKAROUND_CLEAN_CACHE + help + This option adds an alternative code sequence to work around ARM + erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI + master interface and an L2 cache. + + Under certain conditions this erratum can cause a clean line eviction + to occur at the same time as another transaction to the same address + on the AMBA 5 CHI interface, which can cause data corruption if the + interconnect reorders the two transactions. + + The workaround promotes data cache clean instructions to + data cache clean-and-invalidate. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_824069 + bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop" + default y + select ARM64_WORKAROUND_CLEAN_CACHE + help + This option adds an alternative code sequence to work around ARM + erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected + to a coherent interconnect. + + If a Cortex-A53 processor is executing a store or prefetch for + write instruction at the same time as a processor in another + cluster is executing a cache maintenance operation to the same + address, then this erratum might cause a clean cache line to be + incorrectly marked as dirty. + + The workaround promotes data cache clean instructions to + data cache clean-and-invalidate. + Please note that this option does not necessarily enable the + workaround, as it depends on the alternative framework, which will + only patch the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_819472 + bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption" + default y + select ARM64_WORKAROUND_CLEAN_CACHE + help + This option adds an alternative code sequence to work around ARM + erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache + present when it is connected to a coherent interconnect. + + If the processor is executing a load and store exclusive sequence at + the same time as a processor in another cluster is executing a cache + maintenance operation to the same address, then this erratum might + cause data corruption. + + The workaround promotes data cache clean instructions to + data cache clean-and-invalidate. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_832075 + bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads" + default y + help + This option adds an alternative code sequence to work around ARM + erratum 832075 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might deadlock when exclusive load/store + instructions to Write-Back memory are mixed with Device loads. + + The workaround is to promote device loads to use Load-Acquire + semantics. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_834220 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" + depends on KVM + default y + help + This option adds an alternative code sequence to work around ARM + erratum 834220 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might report a Stage 2 translation + fault as the result of a Stage 1 fault for load crossing a + page boundary when there is a permission or device memory + alignment fault at Stage 1 and a translation fault at Stage 2. + + The workaround is to verify that the Stage 1 translation + doesn't generate a fault before handling the Stage 2 fault. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_1742098 + bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence" + depends on COMPAT + default y + help + This option removes the AES hwcap for aarch32 user-space to + workaround erratum 1742098 on Cortex-A57 and Cortex-A72. + + Affected parts may corrupt the AES state if an interrupt is + taken between a pair of AES instructions. These instructions + are only present if the cryptography extensions are present. + All software should have a fallback implementation for CPUs + that don't implement the cryptography extensions. + + If unsure, say Y. + +config ARM64_ERRATUM_845719 + bool "Cortex-A53: 845719: a load might read incorrect data" + depends on COMPAT + default y + help + This option adds an alternative code sequence to work around ARM + erratum 845719 on Cortex-A53 parts up to r0p4. + + When running a compat (AArch32) userspace on an affected Cortex-A53 + part, a load at EL0 from a virtual address that matches the bottom 32 + bits of the virtual address used by a recent load at (AArch64) EL1 + might return incorrect data. + + The workaround is to write the contextidr_el1 register on exception + return to a 32-bit task. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + +config ARM64_ERRATUM_843419 + bool "Cortex-A53: 843419: A load or store might access an incorrect address" + default y + select ARM64_MODULE_PLTS if MODULES + help + This option links the kernel with '--fix-cortex-a53-843419' and + enables PLT support to replace certain ADRP instructions, which can + cause subsequent memory accesses to use an incorrect address on + Cortex-A53 parts up to r0p4. + + If unsure, say Y. + +config ARM64_LD_HAS_FIX_ERRATUM_843419 + def_bool $(ld-option,--fix-cortex-a53-843419) + +config ARM64_ERRATUM_1024718 + bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" + default y + help + This option adds a workaround for ARM Cortex-A55 Erratum 1024718. + + Affected Cortex-A55 cores (all revisions) could cause incorrect + update of the hardware dirty bit when the DBM/AP bits are updated + without a break-before-make. The workaround is to disable the usage + of hardware DBM locally on the affected cores. CPUs not affected by + this erratum will continue to use the feature. + + If unsure, say Y. + +config ARM64_ERRATUM_1418040 + bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" + default y + depends on COMPAT + help + This option adds a workaround for ARM Cortex-A76/Neoverse-N1 + errata 1188873 and 1418040. + + Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could + cause register corruption when accessing the timer registers + from AArch32 userspace. + + If unsure, say Y. + +config ARM64_WORKAROUND_SPECULATIVE_AT + bool + +config ARM64_ERRATUM_1165522 + bool "Cortex-A76: 1165522: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + select ARM64_WORKAROUND_SPECULATIVE_AT + help + This option adds a workaround for ARM Cortex-A76 erratum 1165522. + + Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with + corrupted TLBs by speculating an AT instruction during a guest + context switch. + + If unsure, say Y. + +config ARM64_ERRATUM_1319367 + bool "Cortex-A57/A72: 1319537: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + select ARM64_WORKAROUND_SPECULATIVE_AT + help + This option adds work arounds for ARM Cortex-A57 erratum 1319537 + and A72 erratum 1319367 + + Cortex-A57 and A72 cores could end-up with corrupted TLBs by + speculating an AT instruction during a guest context switch. + + If unsure, say Y. + +config ARM64_ERRATUM_1530923 + bool "Cortex-A55: 1530923: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + select ARM64_WORKAROUND_SPECULATIVE_AT + help + This option adds a workaround for ARM Cortex-A55 erratum 1530923. + + Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with + corrupted TLBs by speculating an AT instruction during a guest + context switch. + + If unsure, say Y. + +config ARM64_WORKAROUND_REPEAT_TLBI + bool + +config ARM64_ERRATUM_2441007 + bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI" + default y + select ARM64_WORKAROUND_REPEAT_TLBI + help + This option adds a workaround for ARM Cortex-A55 erratum #2441007. + + Under very rare circumstances, affected Cortex-A55 CPUs + may not handle a race between a break-before-make sequence on one + CPU, and another CPU accessing the same page. This could allow a + store to a page that has been unmapped. + + Work around this by adding the affected CPUs to the list that needs + TLB sequences to be done twice. + + If unsure, say Y. + +config ARM64_ERRATUM_1286807 + bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation" + default y + select ARM64_WORKAROUND_REPEAT_TLBI + help + This option adds a workaround for ARM Cortex-A76 erratum 1286807. + + On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual + address for a cacheable mapping of a location is being + accessed by a core while another core is remapping the virtual + address to a new physical page using the recommended + break-before-make sequence, then under very rare circumstances + TLBI+DSB completes before a read using the translation being + invalidated has been observed by other observers. The + workaround repeats the TLBI+DSB operation. + +config ARM64_ERRATUM_1463225 + bool "Cortex-A76: Software Step might prevent interrupt recognition" + default y + help + This option adds a workaround for Arm Cortex-A76 erratum 1463225. + + On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping + of a system call instruction (SVC) can prevent recognition of + subsequent interrupts when software stepping is disabled in the + exception handler of the system call and either kernel debugging + is enabled or VHE is in use. + + Work around the erratum by triggering a dummy step exception + when handling a system call from a task that is being stepped + in a VHE configuration of the kernel. + + If unsure, say Y. + +config ARM64_ERRATUM_1542419 + bool "Neoverse-N1: workaround mis-ordering of instruction fetches" + default y + help + This option adds a workaround for ARM Neoverse-N1 erratum + 1542419. + + Affected Neoverse-N1 cores could execute a stale instruction when + modified by another CPU. The workaround depends on a firmware + counterpart. + + Workaround the issue by hiding the DIC feature from EL0. This + forces user-space to perform cache maintenance. + + If unsure, say Y. + +config ARM64_ERRATUM_1508412 + bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read" + default y + help + This option adds a workaround for Arm Cortex-A77 erratum 1508412. + + Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence + of a store-exclusive or read of PAR_EL1 and a load with device or + non-cacheable memory attributes. The workaround depends on a firmware + counterpart. + + KVM guests must also have the workaround implemented or they can + deadlock the system. + + Work around the issue by inserting DMB SY barriers around PAR_EL1 + register reads and warning KVM users. The DMB barrier is sufficient + to prevent a speculative PAR_EL1 read. + + If unsure, say Y. + +config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE + bool + +config ARM64_ERRATUM_2051678 + bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit" + default y + help + This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678. + Affected Cortex-A510 might not respect the ordering rules for + hardware update of the page table's dirty bit. The workaround + is to not enable the feature on affected CPUs. + + If unsure, say Y. + +config ARM64_ERRATUM_2077057 + bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2" + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2077057. + Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is + expected, but a Pointer Authentication trap is taken instead. The + erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow + EL1 to cause a return to EL2 with a guest controlled ELR_EL2. + + This can only happen when EL2 is stepping EL1. + + When these conditions occur, the SPSR_EL2 value is unchanged from the + previous guest entry, and can be restored from the in-memory copy. + + If unsure, say Y. + +config ARM64_ERRATUM_2658417 + bool "Cortex-A510: 2658417: remove BF16 support due to incorrect result" + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2658417. + Affected Cortex-A510 (r0p0 to r1p1) may produce the wrong result for + BFMMLA or VMMLA instructions in rare circumstances when a pair of + A510 CPUs are using shared neon hardware. As the sharing is not + discoverable by the kernel, hide the BF16 HWCAP to indicate that + user-space should not be using these instructions. + + If unsure, say Y. + +config ARM64_ERRATUM_2119858 + bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode" + default y + depends on CORESIGHT_TRBE + select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE + help + This option adds the workaround for ARM Cortex-A710/X2 erratum 2119858. + + Affected Cortex-A710/X2 cores could overwrite up to 3 cache lines of trace + data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in + the event of a WRAP event. + + Work around the issue by always making sure we move the TRBPTR_EL1 by + 256 bytes before enabling the buffer and filling the first 256 bytes of + the buffer with ETM ignore packets upon disabling. + + If unsure, say Y. + +config ARM64_ERRATUM_2139208 + bool "Neoverse-N2: 2139208: workaround TRBE overwriting trace data in FILL mode" + default y + depends on CORESIGHT_TRBE + select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE + help + This option adds the workaround for ARM Neoverse-N2 erratum 2139208. + + Affected Neoverse-N2 cores could overwrite up to 3 cache lines of trace + data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in + the event of a WRAP event. + + Work around the issue by always making sure we move the TRBPTR_EL1 by + 256 bytes before enabling the buffer and filling the first 256 bytes of + the buffer with ETM ignore packets upon disabling. + + If unsure, say Y. + +config ARM64_WORKAROUND_TSB_FLUSH_FAILURE + bool + +config ARM64_ERRATUM_2054223 + bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Cortex-A710 erratum 2054223 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_ERRATUM_2067961 + bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Neoverse-N2 erratum 2067961 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + bool + +config ARM64_ERRATUM_2253138 + bool "Neoverse-N2: 2253138: workaround TRBE writing to address out-of-range" + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Neoverse-N2 erratum 2253138. + + Affected Neoverse-N2 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + +config ARM64_ERRATUM_2224489 + bool "Cortex-A710/X2: 2224489: workaround TRBE writing to address out-of-range" + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Cortex-A710/X2 erratum 2224489. + + Affected Cortex-A710/X2 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + +config ARM64_ERRATUM_2441009 + bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI" + default y + select ARM64_WORKAROUND_REPEAT_TLBI + help + This option adds a workaround for ARM Cortex-A510 erratum #2441009. + + Under very rare circumstances, affected Cortex-A510 CPUs + may not handle a race between a break-before-make sequence on one + CPU, and another CPU accessing the same page. This could allow a + store to a page that has been unmapped. + + Work around this by adding the affected CPUs to the list that needs + TLB sequences to be done twice. + + If unsure, say Y. + +config ARM64_ERRATUM_2064142 + bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled" + depends on CORESIGHT_TRBE + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2064142. + + Affected Cortex-A510 core might fail to write into system registers after the + TRBE has been disabled. Under some conditions after the TRBE has been disabled + writes into TRBE registers TRBLIMITR_EL1, TRBPTR_EL1, TRBBASER_EL1, TRBSR_EL1, + and TRBTRG_EL1 will be ignored and will not be effected. + + Work around this in the driver by executing TSB CSYNC and DSB after collection + is stopped and before performing a system register write to one of the affected + registers. + + If unsure, say Y. + +config ARM64_ERRATUM_2038923 + bool "Cortex-A510: 2038923: workaround TRBE corruption with enable" + depends on CORESIGHT_TRBE + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2038923. + + Affected Cortex-A510 core might cause an inconsistent view on whether trace is + prohibited within the CPU. As a result, the trace buffer or trace buffer state + might be corrupted. This happens after TRBE buffer has been enabled by setting + TRBLIMITR_EL1.E, followed by just a single context synchronization event before + execution changes from a context, in which trace is prohibited to one where it + isn't, or vice versa. In these mentioned conditions, the view of whether trace + is prohibited is inconsistent between parts of the CPU, and the trace buffer or + the trace buffer state might be corrupted. + + Work around this in the driver by preventing an inconsistent view of whether the + trace is prohibited or not based on TRBLIMITR_EL1.E by immediately following a + change to TRBLIMITR_EL1.E with at least one ISB instruction before an ERET, or + two ISB instructions if no ERET is to take place. + + If unsure, say Y. + +config ARM64_ERRATUM_1902691 + bool "Cortex-A510: 1902691: workaround TRBE trace corruption" + depends on CORESIGHT_TRBE + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 1902691. + + Affected Cortex-A510 core might cause trace data corruption, when being written + into the memory. Effectively TRBE is broken and hence cannot be used to capture + trace data. + + Work around this problem in the driver by just preventing TRBE initialization on + affected cpus. The firmware must have disabled the access to TRBE for the kernel + on such implementations. This will cover the kernel for any firmware that doesn't + do this already. + + If unsure, say Y. + +config ARM64_ERRATUM_2457168 + bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" + depends on ARM64_AMU_EXTN + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2457168. + + The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate + as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments + incorrectly giving a significantly higher output value. + + Work around this problem by returning 0 when reading the affected counter in + key locations that results in disabling all users of this counter. This effect + is the same to firmware disabling affected counters. + + If unsure, say Y. + +config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + bool + +config ARM64_ERRATUM_2966298 + bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load" + select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + default y + help + This option adds the workaround for ARM Cortex-A520 erratum 2966298. + + On an affected Cortex-A520 core, a speculatively executed unprivileged + load might leak data from a privileged level via a cache side channel. + + Work around this problem by executing a TLBI before returning to EL0. + + If unsure, say Y. + +config CAVIUM_ERRATUM_22375 + bool "Cavium erratum 22375, 24313" + default y + help + Enable workaround for errata 22375 and 24313. + + This implements two gicv3-its errata workarounds for ThunderX. Both + with a small impact affecting only ITS table allocation. + + erratum 22375: only alloc 8MB table size + erratum 24313: ignore memory access type + + The fixes are in ITS initialization and basically ignore memory access + type and table size provided by the TYPER and BASER registers. + + If unsure, say Y. + +config CAVIUM_ERRATUM_23144 + bool "Cavium erratum 23144: ITS SYNC hang on dual socket system" + depends on NUMA + default y + help + ITS SYNC command hang for cross node io and collections/cpu mapping. + + If unsure, say Y. + +config CAVIUM_ERRATUM_23154 + bool "Cavium errata 23154 and 38545: GICv3 lacks HW synchronisation" + default y + help + The ThunderX GICv3 implementation requires a modified version for + reading the IAR status to ensure data synchronization + (access to icc_iar1_el1 is not sync'ed before and after). + + It also suffers from erratum 38545 (also present on Marvell's + OcteonTX and OcteonTX2), resulting in deactivated interrupts being + spuriously presented to the CPU interface. + + If unsure, say Y. + +config CAVIUM_ERRATUM_27456 + bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption" + default y + help + On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI + instructions may cause the icache to become corrupted if it + contains data for a non-current ASID. The fix is to + invalidate the icache when changing the mm context. + + If unsure, say Y. + +config CAVIUM_ERRATUM_30115 + bool "Cavium erratum 30115: Guest may disable interrupts in host" + default y + help + On ThunderX T88 pass 1.x through 2.2, T81 pass 1.0 through + 1.2, and T83 Pass 1.0, KVM guest execution may disable + interrupts in host. Trapping both GICv3 group-0 and group-1 + accesses sidesteps the issue. + + If unsure, say Y. + +config CAVIUM_TX2_ERRATUM_219 + bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails" + default y + help + On Cavium ThunderX2, a load, store or prefetch instruction between a + TTBR update and the corresponding context synchronizing operation can + cause a spurious Data Abort to be delivered to any hardware thread in + the CPU core. + + Work around the issue by avoiding the problematic code sequence and + trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The + trap handler performs the corresponding register access, skips the + instruction and ensures context synchronization by virtue of the + exception return. + + If unsure, say Y. + +config FUJITSU_ERRATUM_010001 + bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" + default y + help + This option adds a workaround for Fujitsu-A64FX erratum E#010001. + On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory + accesses may cause undefined fault (Data abort, DFSC=0b111111). + This fault occurs under a specific hardware condition when a + load/store instruction performs an address translation using: + case-1 TTBR0_EL1 with TCR_EL1.NFD0 == 1. + case-2 TTBR0_EL2 with TCR_EL2.NFD0 == 1. + case-3 TTBR1_EL1 with TCR_EL1.NFD1 == 1. + case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. + + The workaround is to ensure these bits are clear in TCR_ELx. + The workaround only affects the Fujitsu-A64FX. + + If unsure, say Y. + +config HISILICON_ERRATUM_161600802 + bool "Hip07 161600802: Erroneous redistributor VLPI base" + default y + help + The HiSilicon Hip07 SoC uses the wrong redistributor base + when issued ITS commands such as VMOVP and VMAPP, and requires + a 128kB offset to be applied to the target address in this commands. + + If unsure, say Y. + +config QCOM_FALKOR_ERRATUM_1003 + bool "Falkor E1003: Incorrect translation due to ASID change" + default y + help + On Falkor v1, an incorrect ASID may be cached in the TLB when ASID + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID + in TTBR1_EL1, this situation only occurs in the entry trampoline and + then only for entries in the walk cache, since the leaf translation + is unchanged. Work around the erratum by invalidating the walk cache + entries for the trampoline before entering the kernel proper. + +config QCOM_FALKOR_ERRATUM_1009 + bool "Falkor E1009: Prematurely complete a DSB after a TLBI" + default y + select ARM64_WORKAROUND_REPEAT_TLBI + help + On Falkor v1, the CPU may prematurely complete a DSB following a + TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation + one more time to fix the issue. + + If unsure, say Y. + +config QCOM_QDF2400_ERRATUM_0065 + bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" + default y + help + On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports + ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have + been indicated as 16Bytes (0xf), not 8Bytes (0x7). + + If unsure, say Y. + +config QCOM_FALKOR_ERRATUM_E1041 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" + default y + help + Falkor CPU may speculatively fetch instructions from an improper + memory location when MMU translation is changed from SCTLR_ELn[M]=1 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. + + If unsure, say Y. + +config NVIDIA_CARMEL_CNP_ERRATUM + bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores" + default y + help + If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not + invalidate shared TLB entries installed by a different core, as it would + on standard ARM cores. + + If unsure, say Y. + +config SOCIONEXT_SYNQUACER_PREITS + bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" + default y + help + Socionext Synquacer SoCs implement a separate h/w block to generate + MSI doorbell writes with non-zero values for the device ID. + + If unsure, say Y. + +endmenu # "ARM errata workarounds via the alternatives framework" + +choice + prompt "Page size" + default ARM64_4K_PAGES + help + Page size (translation granule) configuration. + +config ARM64_4K_PAGES + bool "4KB" + help + This feature enables 4KB pages support. + +config ARM64_16K_PAGES + bool "16KB" + help + The system will use 16KB pages support. AArch32 emulation + requires applications compiled with 16K (or a multiple of 16K) + aligned segments. + +config ARM64_64K_PAGES + bool "64KB" + help + This feature enables 64KB pages support (4KB by default) + allowing only two levels of page tables and faster TLB + look-up. AArch32 emulation requires applications compiled + with 64K aligned segments. + +endchoice + +choice + prompt "Virtual address space size" + default ARM64_VA_BITS_39 if ARM64_4K_PAGES + default ARM64_VA_BITS_47 if ARM64_16K_PAGES + default ARM64_VA_BITS_42 if ARM64_64K_PAGES + help + Allows choosing one of multiple possible virtual address + space sizes. The level of translation table is determined by + a combination of page size and virtual address space size. + +config ARM64_VA_BITS_36 + bool "36-bit" if EXPERT + depends on ARM64_16K_PAGES + +config ARM64_VA_BITS_39 + bool "39-bit" + depends on ARM64_4K_PAGES + +config ARM64_VA_BITS_42 + bool "42-bit" + depends on ARM64_64K_PAGES + +config ARM64_VA_BITS_47 + bool "47-bit" + depends on ARM64_16K_PAGES + +config ARM64_VA_BITS_48 + bool "48-bit" + +config ARM64_VA_BITS_52 + bool "52-bit" + depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) + help + Enable 52-bit virtual addressing for userspace when explicitly + requested via a hint to mmap(). The kernel will also use 52-bit + virtual addresses for its own mappings (provided HW support for + this feature is available, otherwise it reverts to 48-bit). + + NOTE: Enabling 52-bit virtual addressing in conjunction with + ARMv8.3 Pointer Authentication will result in the PAC being + reduced from 7 bits to 3 bits, which may have a significant + impact on its susceptibility to brute-force attacks. + + If unsure, select 48-bit virtual addressing instead. + +endchoice + +config ARM64_FORCE_52BIT + bool "Force 52-bit virtual addresses for userspace" + depends on ARM64_VA_BITS_52 && EXPERT + help + For systems with 52-bit userspace VAs enabled, the kernel will attempt + to maintain compatibility with older software by providing 48-bit VAs + unless a hint is supplied to mmap. + + This configuration option disables the 48-bit compatibility logic, and + forces all userspace addresses to be 52-bit on HW that supports it. One + should only enable this configuration option for stress testing userspace + memory management code. If unsure say N here. + +config ARM64_VA_BITS + int + default 36 if ARM64_VA_BITS_36 + default 39 if ARM64_VA_BITS_39 + default 42 if ARM64_VA_BITS_42 + default 47 if ARM64_VA_BITS_47 + default 48 if ARM64_VA_BITS_48 + default 52 if ARM64_VA_BITS_52 + +choice + prompt "Physical address space size" + default ARM64_PA_BITS_48 + help + Choose the maximum physical address range that the kernel will + support. + +config ARM64_PA_BITS_48 + bool "48-bit" + +config ARM64_PA_BITS_52 + bool "52-bit (ARMv8.2)" + depends on ARM64_64K_PAGES + depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN + help + Enable support for a 52-bit physical address space, introduced as + part of the ARMv8.2-LPA extension. + + With this enabled, the kernel will also continue to work on CPUs that + do not support ARMv8.2-LPA, but with some added memory overhead (and + minor performance overhead). + +endchoice + +config ARM64_PA_BITS + int + default 48 if ARM64_PA_BITS_48 + default 52 if ARM64_PA_BITS_52 + +choice + prompt "Endianness" + default CPU_LITTLE_ENDIAN + help + Select the endianness of data accesses performed by the CPU. Userspace + applications will need to be compiled and linked for the endianness + that is selected here. + +config CPU_BIG_ENDIAN + bool "Build big-endian kernel" + depends on !LD_IS_LLD || LLD_VERSION >= 130000 + # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c + depends on AS_IS_GNU || AS_VERSION >= 150000 + help + Say Y if you plan on running a kernel with a big-endian userspace. + +config CPU_LITTLE_ENDIAN + bool "Build little-endian kernel" + help + Say Y if you plan on running a kernel with a little-endian userspace. + This is usually the case for distributions targeting arm64. + +endchoice + +config SCHED_MC + bool "Multi-core scheduler support" + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + +config SCHED_CLUSTER + bool "Cluster scheduler support" + help + Cluster scheduler support improves the CPU scheduler's decision + making when dealing with machines that have clusters of CPUs. + Cluster usually means a couple of CPUs which are placed closely + by sharing mid-level caches, last-level cache tags or internal + busses. + +config SCHED_SMT + bool "SMT scheduler support" + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config NR_CPUS + int "Maximum number of CPUs (2-4096)" + range 2 4096 + default "256" + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + select GENERIC_IRQ_MIGRATION + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. + +# Common NUMA Features +config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA + select ACPI_NUMA if ACPI + select OF_NUMA + select HAVE_SETUP_PER_CPU_AREA + select NEED_PER_CPU_EMBED_FIRST_CHUNK + select NEED_PER_CPU_PAGE_FIRST_CHUNK + select USE_PERCPU_NUMA_NODE_ID + help + Enable NUMA (Non-Uniform Memory Access) support. + + The kernel will try to allocate memory used by a CPU on the + local memory of the CPU and add some more + NUMA awareness to the kernel. + +config NODES_SHIFT + int "Maximum NUMA Nodes (as a power of 2)" + range 1 10 + default "4" + depends on NUMA + help + Specify the maximum number of NUMA Nodes available on the target + system. Increases memory reserved to accommodate various tables. + +source "kernel/Kconfig.hz" + +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_VMEMMAP_ENABLE + select SPARSEMEM_VMEMMAP + +config HW_PERF_EVENTS + def_bool y + depends on ARM_PMU + +# Supported by clang >= 7.0 or GCC >= 12.0.0 +config CC_HAVE_SHADOW_CALL_STACK + def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) + +config PARAVIRT + bool "Enable paravirtualization code" + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. + +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + select PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + +config KEXEC + depends on PM_SLEEP_SMP + select KEXEC_CORE + bool "kexec system call" + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + +config KEXEC_FILE + bool "kexec file based system call" + select KEXEC_CORE + select HAVE_IMA_KEXEC if IMA + help + This is new version of kexec system call. This system call is + file based and takes file descriptors as system call argument + for kernel and initramfs as opposed to list of segments as + accepted by previous system call. + +config KEXEC_SIG + bool "Verify kernel signature during kexec_file_load() syscall" + depends on KEXEC_FILE + help + Select this option to verify a signature with loaded kernel + image. If configured, any attempt of loading a image without + valid signature will fail. + + In addition to that option, you need to enable signature + verification for the corresponding kernel image type being + loaded in order for this to work. + +config KEXEC_IMAGE_VERIFY_SIG + bool "Enable Image signature verification support" + default y + depends on KEXEC_SIG + depends on EFI && SIGNED_PE_FILE_VERIFICATION + help + Enable Image signature verification support. + +comment "Support for PE file signature verification disabled" + depends on KEXEC_SIG + depends on !EFI || !SIGNED_PE_FILE_VERIFICATION + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + +config TRANS_TABLE + def_bool y + depends on HIBERNATION || KEXEC_CORE + +config XEN_DOM0 + def_bool y + depends on XEN + +config XEN + bool "Xen guest support on ARM64" + depends on ARM64 && OF + select SWIOTLB_XEN + select PARAVIRT + help + Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. + +config ARCH_FORCE_MAX_ORDER + int + default "14" if ARM64_64K_PAGES + default "12" if ARM64_16K_PAGES + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate upto a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + + However for 4K, we choose a higher default value, 11 as opposed to 10, giving us + 4M allocations matching the default size used by generic code. + +config UNMAP_KERNEL_AT_EL0 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + be used to bypass MMU permission checks and leak kernel data to + userspace. This can be defended against by unmapping the kernel + when running in userspace, mapping it back in on exception entry + via a trampoline page in the vector table. + + If unsure, say Y. + +config MITIGATE_SPECTRE_BRANCH_HISTORY + bool "Mitigate Spectre style attacks against branch history" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. + When taking an exception from user-space, a sequence of branches + or a firmware call overwrites the branch history. + +config RODATA_FULL_DEFAULT_ENABLED + bool "Apply r/o permissions of VM areas also to their linear aliases" + default y + help + Apply read-only attributes of VM areas to the linear alias of + the backing pages as well. This prevents code or read-only data + from being modified (inadvertently or intentionally) via another + mapping of the same memory page. This additional enhancement can + be turned off at runtime by passing rodata=[off|on] (and turned on + with rodata=full if this option is set to 'n') + + This requires the linear region to be mapped down to pages, + which may adversely affect performance in some cases. + +config ARM64_SW_TTBR0_PAN + bool "Emulate Privileged Access Never using TTBR0_EL1 switching" + help + Enabling this option prevents the kernel from accessing + user-space memory directly by pointing TTBR0_EL1 to a reserved + zeroed area and reserved ASID. The user access routines + restore the valid TTBR0_EL1 temporarily. + +config ARM64_TAGGED_ADDR_ABI + bool "Enable the tagged user addresses syscall ABI" + default y + help + When this option is enabled, user applications can opt in to a + relaxed ABI via prctl() allowing tagged addresses to be passed + to system calls as pointer arguments. For details, see + Documentation/arm64/tagged-address-abi.rst. + +menuconfig COMPAT + bool "Kernel support for 32-bit EL0" + depends on ARM64_4K_PAGES || EXPERT + select HAVE_UID16 + select OLD_SIGSUSPEND3 + select COMPAT_OLD_SIGACTION + help + This option enables support for a 32-bit EL0 running under a 64-bit + kernel at EL1. AArch32-specific components such as system calls, + the user helper functions, VFP support and the ptrace interface are + handled appropriately by the kernel. + + If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware + that you will only be able to execute AArch32 binaries that were compiled + with page size aligned segments. + + If you want to execute 32-bit userspace applications, say Y. + +if COMPAT + +config KUSER_HELPERS + bool "Enable kuser helpers page for 32-bit applications" + default y + help + Warning: disabling this option may break 32-bit user programs. + + Provide kuser helpers to compat tasks. The kernel provides + helper code to userspace in read only form at a fixed location + to allow userspace to be independent of the CPU type fitted to + the system. This permits binaries to be run on ARMv4 through + to ARMv8 without modification. + + See Documentation/arm/kernel_user_helpers.rst for details. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off to hinder + such exploits. However, in that case, if a binary or library + relying on those helpers is run, it will not function correctly. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + +config COMPAT_VDSO + bool "Enable vDSO for 32-bit applications" + depends on !CPU_BIG_ENDIAN + depends on (CC_IS_CLANG && LD_IS_LLD) || "$(CROSS_COMPILE_COMPAT)" != "" + select GENERIC_COMPAT_VDSO + default y + help + Place in the process address space of 32-bit applications an + ELF shared object providing fast implementations of gettimeofday + and clock_gettime. + + You must have a 32-bit build of glibc 2.22 or later for programs + to seamlessly take advantage of this. + +config THUMB2_COMPAT_VDSO + bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT + depends on COMPAT_VDSO + default y + help + Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y, + otherwise with '-marm'. + +config COMPAT_ALIGNMENT_FIXUPS + bool "Fix up misaligned multi-word loads and stores in user space" + +menuconfig ARMV8_DEPRECATED + bool "Emulate deprecated/obsolete ARMv8 instructions" + depends on SYSCTL + help + Legacy software support may require certain instructions + that have been deprecated or obsoleted in the architecture. + + Enable this config to enable selective emulation of these + features. + + If unsure, say Y + +if ARMV8_DEPRECATED + +config SWP_EMULATION + bool "Emulate SWP/SWPB instructions" + help + ARMv8 obsoletes the use of A32 SWP/SWPB instructions such that + they are always undefined. Say Y here to enable software + emulation of these instructions for userspace using LDXR/STXR. + This feature can be controlled at runtime with the abi.swp + sysctl which is disabled by default. + + In some older versions of glibc [<=2.8] SWP is used during futex + trylock() operations with the assumption that the code will not + be preempted. This invalid assumption may be more likely to fail + with SWP emulation enabled, leading to deadlock of the user + application. + + NOTE: when accessing uncached shared regions, LDXR/STXR rely + on an external transaction monitoring block called a global + monitor to maintain update atomicity. If your system does not + implement a global monitor, this option can cause programs that + perform SWP operations to uncached memory to deadlock. + + If unsure, say Y + +config CP15_BARRIER_EMULATION + bool "Emulate CP15 Barrier instructions" + help + The CP15 barrier instructions - CP15ISB, CP15DSB, and + CP15DMB - are deprecated in ARMv8 (and ARMv7). It is + strongly recommended to use the ISB, DSB, and DMB + instructions instead. + + Say Y here to enable software emulation of these + instructions for AArch32 userspace code. When this option is + enabled, CP15 barrier usage is traced which can help + identify software that needs updating. This feature can be + controlled at runtime with the abi.cp15_barrier sysctl. + + If unsure, say Y + +config SETEND_EMULATION + bool "Emulate SETEND instruction" + help + The SETEND instruction alters the data-endianness of the + AArch32 EL0, and is deprecated in ARMv8. + + Say Y here to enable software emulation of the instruction + for AArch32 userspace code. This feature can be controlled + at runtime with the abi.setend sysctl. + + Note: All the cpus on the system must have mixed endian support at EL0 + for this feature to be enabled. If a new CPU - which doesn't support mixed + endian - is hotplugged in after this feature has been enabled, there could + be unexpected results in the applications. + + If unsure, say Y +endif # ARMV8_DEPRECATED + +endif # COMPAT + +menu "ARMv8.1 architectural features" + +config ARM64_HW_AFDBM + bool "Support for hardware updates of the Access and Dirty page flags" + default y + help + The ARMv8.1 architecture extensions introduce support for + hardware updates of the access and dirty information in page + table entries. When enabled in TCR_EL1 (HA and HD bits) on + capable processors, accesses to pages with PTE_AF cleared will + set this bit instead of raising an access flag fault. + Similarly, writes to read-only pages with the DBM bit set will + clear the read-only bit (AP[2]) instead of raising a + permission fault. + + Kernels built with this configuration option enabled continue + to work on pre-ARMv8.1 hardware and the performance impact is + minimal. If unsure, say Y. + +config ARM64_PAN + bool "Enable support for Privileged Access Never (PAN)" + default y + help + Privileged Access Never (PAN; part of the ARMv8.1 Extensions) + prevents the kernel or hypervisor from accessing user-space (EL0) + memory directly. + + Choosing this option will cause any unprotected (not using + copy_to_user et al) memory access to fail with a permission fault. + + The feature is detected at runtime, and will remain as a 'nop' + instruction if the cpu does not implement the feature. + +config AS_HAS_LDAPR + def_bool $(as-instr,.arch_extension rcpc) + +config AS_HAS_LSE_ATOMICS + def_bool $(as-instr,.arch_extension lse) + +config ARM64_LSE_ATOMICS + bool + default ARM64_USE_LSE_ATOMICS + depends on AS_HAS_LSE_ATOMICS + +config ARM64_USE_LSE_ATOMICS + bool "Atomic instructions" + depends on JUMP_LABEL + default y + help + As part of the Large System Extensions, ARMv8.1 introduces new + atomic instructions that are designed specifically to scale in + very large systems. + + Say Y here to make use of these instructions for the in-kernel + atomic routines. This incurs a small overhead on CPUs that do + not support these instructions and requires the kernel to be + built with binutils >= 2.25 in order for the new instructions + to be used. + +endmenu # "ARMv8.1 architectural features" + +menu "ARMv8.2 architectural features" + +config AS_HAS_ARMV8_2 + def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a) + +config AS_HAS_SHA3 + def_bool $(as-instr,.arch armv8.2-a+sha3) + +config ARM64_PMEM + bool "Enable support for persistent memory" + select ARCH_HAS_PMEM_API + select ARCH_HAS_UACCESS_FLUSHCACHE + help + Say Y to enable support for the persistent memory API based on the + ARMv8.2 DCPoP feature. + + The feature is detected at runtime, and the kernel will use DC CVAC + operations if DC CVAP is not supported (following the behaviour of + DC CVAP itself if the system does not define a point of persistence). + +config ARM64_RAS_EXTN + bool "Enable support for RAS CPU Extensions" + default y + help + CPUs that support the Reliability, Availability and Serviceability + (RAS) Extensions, part of ARMv8.2 are able to track faults and + errors, classify them and report them to software. + + On CPUs with these extensions system software can use additional + barriers to determine if faults are pending and read the + classification from a new set of registers. + + Selecting this feature will allow the kernel to use these barriers + and access the new registers if the system supports the extension. + Platform RAS features may additionally depend on firmware support. + +config ARM64_CNP + bool "Enable support for Common Not Private (CNP) translations" + default y + depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN + help + Common Not Private (CNP) allows translation table entries to + be shared between different PEs in the same inner shareable + domain, so the hardware can use this fact to optimise the + caching of such entries in the TLB. + + Selecting this option allows the CNP feature to be detected + at runtime, and does not affect PEs that do not implement + this feature. + +endmenu # "ARMv8.2 architectural features" + +menu "ARMv8.3 architectural features" + +config ARM64_PTR_AUTH + bool "Enable support for pointer authentication" + default y + help + Pointer authentication (part of the ARMv8.3 Extensions) provides + instructions for signing and authenticating pointers against secret + keys, which can be used to mitigate Return Oriented Programming (ROP) + and other attacks. + + This option enables these instructions at EL0 (i.e. for userspace). + Choosing this option will cause the kernel to initialise secret keys + for each process at exec() time, with these keys being + context-switched along with the process. + + The feature is detected at runtime. If the feature is not present in + hardware it will not be advertised to userspace/KVM guest nor will it + be enabled. + + If the feature is present on the boot CPU but not on a late CPU, then + the late CPU will be parked. Also, if the boot CPU does not have + address auth and the late CPU has then the late CPU will still boot + but with the feature disabled. On such a system, this option should + not be selected. + +config ARM64_PTR_AUTH_KERNEL + bool "Use pointer authentication for kernel" + default y + depends on ARM64_PTR_AUTH + depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + # Modern compilers insert a .note.gnu.property section note for PAC + # which is only understood by binutils starting with version 2.33.1. + depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) + depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) + help + If the compiler supports the -mbranch-protection or + -msign-return-address flag (e.g. GCC 7 or later), then this option + will cause the kernel itself to be compiled with return address + protection. In this case, and if the target hardware is known to + support pointer authentication, then CONFIG_STACKPROTECTOR can be + disabled with minimal loss of protection. + + This feature works with FUNCTION_GRAPH_TRACER option only if + DYNAMIC_FTRACE_WITH_REGS is enabled. + +config CC_HAS_BRANCH_PROT_PAC_RET + # GCC 9 or later, clang 8 or later + def_bool $(cc-option,-mbranch-protection=pac-ret+leaf) + +config CC_HAS_SIGN_RETURN_ADDRESS + # GCC 7, 8 + def_bool $(cc-option,-msign-return-address=all) + +config AS_HAS_PAC + def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a) + +config AS_HAS_CFI_NEGATE_RA_STATE + def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) + +endmenu # "ARMv8.3 architectural features" + +menu "ARMv8.4 architectural features" + +config ARM64_AMU_EXTN + bool "Enable support for the Activity Monitors Unit CPU extension" + default y + help + The activity monitors extension is an optional extension introduced + by the ARMv8.4 CPU architecture. This enables support for version 1 + of the activity monitors architecture, AMUv1. + + To enable the use of this extension on CPUs that implement it, say Y. + + Note that for architectural reasons, firmware _must_ implement AMU + support when running on CPUs that present the activity monitors + extension. The required support is present in: + * Version 1.5 and later of the ARM Trusted Firmware + + For kernels that have this configuration enabled but boot with broken + firmware, you may need to say N here until the firmware is fixed. + Otherwise you may experience firmware panics or lockups when + accessing the counter registers. Even if you are not observing these + symptoms, the values returned by the register reads might not + correctly reflect reality. Most commonly, the value read will be 0, + indicating that the counter is not enabled. + +config AS_HAS_ARMV8_4 + def_bool $(cc-option,-Wa$(comma)-march=armv8.4-a) + +config ARM64_TLB_RANGE + bool "Enable support for tlbi range feature" + default y + depends on AS_HAS_ARMV8_4 + help + ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a + range of input addresses. + + The feature introduces new assembly instructions, and they were + support when binutils >= 2.30. + +endmenu # "ARMv8.4 architectural features" + +menu "ARMv8.5 architectural features" + +config AS_HAS_ARMV8_5 + def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a) + +config ARM64_BTI + bool "Branch Target Identification support" + default y + help + Branch Target Identification (part of the ARMv8.5 Extensions) + provides a mechanism to limit the set of locations to which computed + branch instructions such as BR or BLR can jump. + + To make use of BTI on CPUs that support it, say Y. + + BTI is intended to provide complementary protection to other control + flow integrity protection mechanisms, such as the Pointer + authentication mechanism provided as part of the ARMv8.3 Extensions. + For this reason, it does not make sense to enable this option without + also enabling support for pointer authentication. Thus, when + enabling this option you should also select ARM64_PTR_AUTH=y. + + Userspace binaries must also be specifically compiled to make use of + this mechanism. If you say N here or the hardware does not support + BTI, such binaries can still run, but you get no additional + enforcement of branch destinations. + +config ARM64_BTI_KERNEL + bool "Use Branch Target Identification for kernel" + default y + depends on ARM64_BTI + depends on ARM64_PTR_AUTH_KERNEL + depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 + depends on !CC_IS_GCC || GCC_VERSION >= 100100 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 + depends on !CC_IS_GCC + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) + help + Build the kernel with Branch Target Identification annotations + and enable enforcement of this for kernel code. When this option + is enabled and the system supports BTI all kernel code including + modular code must have BTI enabled. + +config CC_HAS_BRANCH_PROT_PAC_RET_BTI + # GCC 9 or later, clang 8 or later + def_bool $(cc-option,-mbranch-protection=pac-ret+leaf+bti) + +config ARM64_E0PD + bool "Enable support for E0PD" + default y + help + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. + + This option enables E0PD for TTBR1 where available. + +config ARM64_AS_HAS_MTE + # Initial support for MTE went in binutils 2.32.0, checked with + # ".arch armv8.5-a+memtag" below. However, this was incomplete + # as a late addition to the final architecture spec (LDGM/STGM) + # is only supported in the newer 2.32.x and 2.33 binutils + # versions, hence the extra "stgm" instruction check below. + def_bool $(as-instr,.arch armv8.5-a+memtag\nstgm xzr$(comma)[x0]) + +config ARM64_MTE + bool "Memory Tagging Extension support" + default y + depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI + depends on AS_HAS_ARMV8_5 + depends on AS_HAS_LSE_ATOMICS + # Required for tag checking in the uaccess routines + depends on ARM64_PAN + select ARCH_HAS_SUBPAGE_FAULTS + select ARCH_USES_HIGH_VMA_FLAGS + help + Memory Tagging (part of the ARMv8.5 Extensions) provides + architectural support for run-time, always-on detection of + various classes of memory error to aid with software debugging + to eliminate vulnerabilities arising from memory-unsafe + languages. + + This option enables the support for the Memory Tagging + Extension at EL0 (i.e. for userspace). + + Selecting this option allows the feature to be detected at + runtime. Any secondary CPU not implementing this feature will + not be allowed a late bring-up. + + Userspace binaries that want to use this feature must + explicitly opt in. The mechanism for the userspace is + described in: + + Documentation/arm64/memory-tagging-extension.rst. + +endmenu # "ARMv8.5 architectural features" + +menu "ARMv8.7 architectural features" + +config ARM64_EPAN + bool "Enable support for Enhanced Privileged Access Never (EPAN)" + default y + depends on ARM64_PAN + help + Enhanced Privileged Access Never (EPAN) allows Privileged + Access Never to be used with Execute-only mappings. + + The feature is detected at runtime, and will remain disabled + if the cpu does not implement the feature. +endmenu # "ARMv8.7 architectural features" + +config ARM64_SVE + bool "ARM Scalable Vector Extension support" + default y + help + The Scalable Vector Extension (SVE) is an extension to the AArch64 + execution state which complements and extends the SIMD functionality + of the base architecture to support much larger vectors and to enable + additional vectorisation opportunities. + + To enable use of this extension on CPUs that implement it, say Y. + + On CPUs that support the SVE2 extensions, this option will enable + those too. + + Note that for architectural reasons, firmware _must_ implement SVE + support when running on SVE capable hardware. The required support + is present in: + + * version 1.5 and later of the ARM Trusted Firmware + * the AArch64 boot wrapper since commit 5e1261e08abf + ("bootwrapper: SVE: Enable SVE for EL2 and below"). + + For other firmware implementations, consult the firmware documentation + or vendor. + + If you need the kernel to boot on SVE-capable hardware with broken + firmware, you may need to say N here until you get your firmware + fixed. Otherwise, you may experience firmware panics or lockups when + booting the kernel. If unsure and you are not observing these + symptoms, you should assume that it is safe to say Y. + +config ARM64_SME + bool "ARM Scalable Matrix Extension support" + default y + depends on ARM64_SVE + help + The Scalable Matrix Extension (SME) is an extension to the AArch64 + execution state which utilises a substantial subset of the SVE + instruction set, together with the addition of new architectural + register state capable of holding two dimensional matrix tiles to + enable various matrix operations. + +config ARM64_MODULE_PLTS + bool "Use PLTs to allow module memory to spill over into vmalloc area" + depends on MODULES + select HAVE_MOD_ARCH_SPECIFIC + help + Allocate PLTs when loading modules so that jumps and calls whose + targets are too far away for their relative offsets to be encoded + in the instructions themselves can be bounced via veneers in the + module's PLT. This allows modules to be allocated in the generic + vmalloc area after the dedicated module memory area has been + exhausted. + + When running with address space randomization (KASLR), the module + region itself may be too far away for ordinary relative jumps and + calls, and so in that case, module PLTs are required and cannot be + disabled. + + Specific errata workaround(s) might also force module PLTs to be + enabled (ARM64_ERRATUM_843419). + +config ARM64_PSEUDO_NMI + bool "Support for NMI-like interrupts" + select ARM_GIC_V3 + help + Adds support for mimicking Non-Maskable Interrupts through the use of + GIC interrupt priority. This support requires version 3 or later of + ARM GIC. + + This high priority configuration for interrupts needs to be + explicitly enabled by setting the kernel parameter + "irqchip.gicv3_pseudo_nmi" to 1. + + If unsure, say N + +if ARM64_PSEUDO_NMI +config ARM64_DEBUG_PRIORITY_MASKING + bool "Debug interrupt priority masking" + help + This adds runtime checks to functions enabling/disabling + interrupts when using priority masking. The additional checks verify + the validity of ICC_PMR_EL1 when calling concerned functions. + + If unsure, say N +endif # ARM64_PSEUDO_NMI + +config RELOCATABLE + bool "Build a relocatable kernel image" if EXPERT + select ARCH_HAS_RELR + default y + help + This builds the kernel as a Position Independent Executable (PIE), + which retains all relocation metadata required to relocate the + kernel binary at runtime to a different virtual address than the + address it was linked at. + Since AArch64 uses the RELA relocation format, this requires a + relocation pass at runtime even if the kernel is loaded at the + same address it was linked at. + +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + select ARM64_MODULE_PLTS if MODULES + select RELOCATABLE + help + Randomizes the virtual address at which the kernel image is + loaded, as a security feature that deters exploit attempts + relying on knowledge of the location of kernel internals. + + It is the bootloader's job to provide entropy, by passing a + random u64 value in /chosen/kaslr-seed at kernel entry. + + When booting via the UEFI stub, it will invoke the firmware's + EFI_RNG_PROTOCOL implementation (if available) to supply entropy + to the kernel proper. In addition, it will randomise the physical + location of the kernel Image as well. + + If unsure, say N. + +config RANDOMIZE_MODULE_REGION_FULL + bool "Randomize the module region over a 2 GB range" + depends on RANDOMIZE_BASE + default y + help + Randomizes the location of the module region inside a 2 GB window + covering the core kernel. This way, it is less likely for modules + to leak information about the location of core kernel data structures + but it does imply that function calls between modules and the core + kernel will need to be resolved via veneers in the module PLT. + + When this option is not set, the module region will be randomized over + a limited range that contains the [_stext, _etext] interval of the + core kernel, so branch relocations are almost always in range unless + ARM64_MODULE_PLTS is enabled and the region is exhausted. In this + particular case of region exhaustion, modules might be able to fall + back to a larger 2GB area. + +config CC_HAVE_STACKPROTECTOR_SYSREG + def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0) + +config STACKPROTECTOR_PER_TASK + def_bool y + depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG + +# The GPIO number here must be sorted by descending number. In case of +# a multiplatform kernel, we just want the highest value required by the +# selected platforms. +config ARCH_NR_GPIO + int + default 2048 if ARCH_APPLE + default 0 + help + Maximum number of GPIOs in the system. + + If unsure, leave the default value. + +endmenu # "Kernel Features" + +menu "Boot options" + +config ARM64_ACPI_PARKING_PROTOCOL + bool "Enable support for the ARM64 ACPI parking protocol" + depends on ACPI + help + Enable support for the ARM64 ACPI parking protocol. If disabled + the kernel will not allow booting through the ARM64 ACPI parking + protocol even if the corresponding data is present in the ACPI + MADT table. + +config CMDLINE + string "Default kernel command string" + default "" + help + Provide a set of default command-line options at build time by + entering them here. As a minimum, you should specify the the + root device (e.g. root=/dev/nfs). + +choice + prompt "Kernel command line type" if CMDLINE != "" + default CMDLINE_FROM_BOOTLOADER + help + Choose how the kernel will handle the provided default kernel + command line string. + +config CMDLINE_FROM_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Uses the command-line options passed by the boot loader. If + the boot loader doesn't provide any, the default kernel command + string provided in CMDLINE will be used. + +config CMDLINE_FORCE + bool "Always use the default kernel command string" + help + Always use the default kernel command string, even if the boot + loader passes other arguments to the kernel. + This is useful if you cannot or don't want to change the + command-line options your boot loader passes to the kernel. + +endchoice + +config EFI_STUB + bool + +config EFI + bool "UEFI runtime support" + depends on OF && !CPU_BIG_ENDIAN + depends on KERNEL_MODE_NEON + select ARCH_SUPPORTS_ACPI + select LIBFDT + select UCS2_STRING + select EFI_PARAMS_FROM_FDT + select EFI_RUNTIME_WRAPPERS + select EFI_STUB + select EFI_GENERIC_STUB + imply IMA_SECURE_AND_OR_TRUSTED_BOOT + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + +endmenu # "Boot options" + +menu "Power management options" + +source "kernel/power/Kconfig" + +config ARCH_HIBERNATION_POSSIBLE + def_bool y + depends on CPU_PM + +config ARCH_HIBERNATION_HEADER + def_bool y + depends on HIBERNATION + +config ARCH_SUSPEND_POSSIBLE + def_bool y + +endmenu # "Power management options" + +menu "CPU Power Management" + +source "drivers/cpuidle/Kconfig" + +source "drivers/cpufreq/Kconfig" + +endmenu # "CPU Power Management" + +source "drivers/acpi/Kconfig" + +source "arch/arm64/kvm/Kconfig" + |