summaryrefslogtreecommitdiffstats
path: root/arch/Kconfig
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/Kconfig
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/Kconfig')
-rw-r--r--arch/Kconfig1437
1 files changed, 1437 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
new file mode 100644
index 000000000..14273a620
--- /dev/null
+++ b/arch/Kconfig
@@ -0,0 +1,1437 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# General architecture dependent options
+#
+
+#
+# Note: arch/$(SRCARCH)/Kconfig needs to be included first so that it can
+# override the default values in this file.
+#
+source "arch/$(SRCARCH)/Kconfig"
+
+menu "General architecture-dependent options"
+
+config CRASH_CORE
+ bool
+
+config KEXEC_CORE
+ select CRASH_CORE
+ bool
+
+config KEXEC_ELF
+ bool
+
+config HAVE_IMA_KEXEC
+ bool
+
+config ARCH_HAS_SUBPAGE_FAULTS
+ bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
+
+config HOTPLUG_SMT
+ bool
+
+config SMT_NUM_THREADS_DYNAMIC
+ bool
+
+config GENERIC_ENTRY
+ bool
+
+config KPROBES
+ bool "Kprobes"
+ depends on MODULES
+ depends on HAVE_KPROBES
+ select KALLSYMS
+ select TASKS_RCU if PREEMPTION
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+config JUMP_LABEL
+ bool "Optimize very unlikely/likely branches"
+ depends on HAVE_ARCH_JUMP_LABEL
+ select OBJTOOL if HAVE_JUMP_LABEL_HACK
+ help
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+ conditions even cheaper to execute within the kernel.
+
+ Certain performance-sensitive kernel code, such as trace points,
+ scheduler functionality, networking code and KVM have such
+ branches and include support for this optimization technique.
+
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile such branches with just a nop
+ instruction. When the condition flag is toggled to true, the
+ nop will be converted to a jump instruction to execute the
+ conditional block of instructions.
+
+ This technique lowers overhead and stress on the branch prediction
+ of the processor and generally makes the kernel faster. The update
+ of the condition is slower, but those are always very rare.
+
+ ( On 32-bit x86, the necessary options added to the compiler
+ flags may increase the size of the kernel slightly. )
+
+config STATIC_KEYS_SELFTEST
+ bool "Static key selftest"
+ depends on JUMP_LABEL
+ help
+ Boot time self-test of the branch patching code.
+
+config STATIC_CALL_SELFTEST
+ bool "Static call selftest"
+ depends on HAVE_STATIC_CALL
+ help
+ Boot time self-test of the call patching code.
+
+config OPTPROBES
+ def_bool y
+ depends on KPROBES && HAVE_OPTPROBES
+ select TASKS_RCU if PREEMPTION
+
+config KPROBES_ON_FTRACE
+ def_bool y
+ depends on KPROBES && HAVE_KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ help
+ If function tracer is enabled and the arch supports full
+ passing of pt_regs to function tracing, then kprobes can
+ optimize on top of function tracing.
+
+config UPROBES
+ def_bool n
+ depends on ARCH_SUPPORTS_UPROBES
+ help
+ Uprobes is the user-space counterpart to kprobes: they
+ enable instrumentation applications (such as 'perf probe')
+ to establish unintrusive probes in user-space binaries and
+ libraries, by executing handler functions when the probes
+ are hit by user-space applications.
+
+ ( These probes come in the form of single-byte breakpoints,
+ managed by the kernel and kept transparent to the probed
+ application. )
+
+config HAVE_64BIT_ALIGNED_ACCESS
+ def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ Some architectures require 64 bit accesses to be 64 bit
+ aligned, which also requires structs containing 64 bit values
+ to be 64 bit aligned too. This includes some 32 bit
+ architectures which can do 64 bit accesses, as well as 64 bit
+ architectures without unaligned access.
+
+ This symbol should be selected by an architecture if 64 bit
+ accesses are required to be 64 bit aligned in this way even
+ though it is not a 64 bit architecture.
+
+ See Documentation/core-api/unaligned-memory-access.rst for
+ more information on the topic of unaligned memory accesses.
+
+config HAVE_EFFICIENT_UNALIGNED_ACCESS
+ bool
+ help
+ Some architectures are unable to perform unaligned accesses
+ without the use of get_unaligned/put_unaligned. Others are
+ unable to perform such accesses efficiently (e.g. trap on
+ unaligned access and require fixing it up in the exception
+ handler.)
+
+ This symbol should be selected by an architecture if it can
+ perform unaligned accesses efficiently to allow different
+ code paths to be selected for these cases. Some network
+ drivers, for example, could opt to not fix up alignment
+ problems with received packets if doing so would not help
+ much.
+
+ See Documentation/core-api/unaligned-memory-access.rst for more
+ information on the topic of unaligned memory accesses.
+
+config ARCH_USE_BUILTIN_BSWAP
+ bool
+ help
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
+
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
+
+config KRETPROBES
+ def_bool y
+ depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK)
+
+config KRETPROBE_ON_RETHOOK
+ def_bool y
+ depends on HAVE_RETHOOK
+ depends on KRETPROBES
+ select RETHOOK
+
+config USER_RETURN_NOTIFIER
+ bool
+ depends on HAVE_USER_RETURN_NOTIFIER
+ help
+ Provide a kernel-internal notification when a cpu is about to
+ switch to user mode.
+
+config HAVE_IOREMAP_PROT
+ bool
+
+config HAVE_KPROBES
+ bool
+
+config HAVE_KRETPROBES
+ bool
+
+config HAVE_OPTPROBES
+ bool
+
+config HAVE_KPROBES_ON_FTRACE
+ bool
+
+config ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ bool
+ help
+ Since kretprobes modifies return address on the stack, the
+ stacktrace may see the kretprobe trampoline address instead
+ of correct one. If the architecture stacktrace code and
+ unwinder can adjust such entries, select this configuration.
+
+config HAVE_FUNCTION_ERROR_INJECTION
+ bool
+
+config HAVE_NMI
+ bool
+
+config HAVE_FUNCTION_DESCRIPTORS
+ bool
+
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+
+config TRACE_IRQFLAGS_NMI_SUPPORT
+ bool
+
+#
+# An arch should select this if it provides all these things:
+#
+# task_pt_regs() in asm/processor.h or asm/ptrace.h
+# arch_has_single_step() if there is hardware single-step support
+# arch_has_block_step() if there is hardware block-step support
+# asm/syscall.h supplying asm-generic/syscall.h interface
+# linux/regset.h user_regset interfaces
+# CORE_DUMP_USE_REGSET #define'd in linux/elf.h
+# TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit}
+# TIF_NOTIFY_RESUME calls resume_user_mode_work()
+#
+config HAVE_ARCH_TRACEHOOK
+ bool
+
+config HAVE_DMA_CONTIGUOUS
+ bool
+
+config GENERIC_SMP_IDLE_THREAD
+ bool
+
+config GENERIC_IDLE_POLL_SETUP
+ bool
+
+config ARCH_HAS_FORTIFY_SOURCE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run with CONFIG_FORTIFY_SOURCE.
+
+#
+# Select if the arch provides a historic keepinit alias for the retain_initrd
+# command line option
+#
+config ARCH_HAS_KEEPINITRD
+ bool
+
+# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
+config ARCH_HAS_SET_MEMORY
+ bool
+
+# Select if arch has all set_direct_map_invalid/default() functions
+config ARCH_HAS_SET_DIRECT_MAP
+ bool
+
+#
+# Select if the architecture provides the arch_dma_set_uncached symbol to
+# either provide an uncached segment alias for a DMA allocation, or
+# to remap the page tables in place.
+#
+config ARCH_HAS_DMA_SET_UNCACHED
+ bool
+
+#
+# Select if the architectures provides the arch_dma_clear_uncached symbol
+# to undo an in-place page table remap for uncached access.
+#
+config ARCH_HAS_DMA_CLEAR_UNCACHED
+ bool
+
+config ARCH_HAS_CPU_FINALIZE_INIT
+ bool
+
+# Select if arch init_task must go in the __init_task_data section
+config ARCH_TASK_STRUCT_ON_STACK
+ bool
+
+# Select if arch has its private alloc_task_struct() function
+config ARCH_TASK_STRUCT_ALLOCATOR
+ bool
+
+config HAVE_ARCH_THREAD_STRUCT_WHITELIST
+ bool
+ depends on !ARCH_TASK_STRUCT_ALLOCATOR
+ help
+ An architecture should select this to provide hardened usercopy
+ knowledge about what region of the thread_struct should be
+ whitelisted for copying to userspace. Normally this is only the
+ FPU registers. Specifically, arch_thread_struct_whitelist()
+ should be implemented. Without this, the entire thread_struct
+ field in task_struct will be left whitelisted.
+
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
+ bool
+
+# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
+config ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ bool
+
+config ARCH_WANTS_NO_INSTR
+ bool
+ help
+ An architecture should select this if the noinstr macro is being used on
+ functions to denote that the toolchain should avoid instrumenting such
+ functions and is required for correctness.
+
+config ARCH_32BIT_OFF_T
+ bool
+ depends on !64BIT
+ help
+ All new 32-bit architectures should have 64-bit off_t type on
+ userspace side which corresponds to the loff_t kernel type. This
+ is the requirement for modern ABIs. Some existing architectures
+ still support 32-bit off_t. This option is enabled for all such
+ architectures explicitly.
+
+# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat
+config ARCH_32BIT_USTAT_F_TINODE
+ bool
+
+config HAVE_ASM_MODVERSIONS
+ bool
+ help
+ This symbol should be selected by an architecture if it provides
+ <asm/asm-prototypes.h> to support the module versioning for symbols
+ exported from assembly code.
+
+config HAVE_REGS_AND_STACK_ACCESS_API
+ bool
+ help
+ This symbol should be selected by an architecture if it supports
+ the API needed to access registers and stack entries from pt_regs,
+ declared in asm/ptrace.h
+ For example the kprobes-based event tracer needs this API.
+
+config HAVE_RSEQ
+ bool
+ depends on HAVE_REGS_AND_STACK_ACCESS_API
+ help
+ This symbol should be selected by an architecture if it
+ supports an implementation of restartable sequences.
+
+config HAVE_RUST
+ bool
+ help
+ This symbol should be selected by an architecture if it
+ supports Rust.
+
+config HAVE_FUNCTION_ARG_ACCESS_API
+ bool
+ help
+ This symbol should be selected by an architecture if it supports
+ the API needed to access function arguments from pt_regs,
+ declared in asm/ptrace.h
+
+config HAVE_HW_BREAKPOINT
+ bool
+ depends on PERF_EVENTS
+
+config HAVE_MIXED_BREAKPOINTS_REGS
+ bool
+ depends on HAVE_HW_BREAKPOINT
+ help
+ Depending on the arch implementation of hardware breakpoints,
+ some of them have separate registers for data and instruction
+ breakpoints addresses, others have mixed registers to store
+ them but define the access type in a control register.
+ Select this option if your arch implements breakpoints under the
+ latter fashion.
+
+config HAVE_USER_RETURN_NOTIFIER
+ bool
+
+config HAVE_PERF_EVENTS_NMI
+ bool
+ help
+ System hardware can generate an NMI using the perf event
+ subsystem. Also has support for calculating CPU cycle events
+ to determine how many clock cycles in a given period.
+
+config HAVE_HARDLOCKUP_DETECTOR_PERF
+ bool
+ depends on HAVE_PERF_EVENTS_NMI
+ help
+ The arch chooses to use the generic perf-NMI-based hardlockup
+ detector. Must define HAVE_PERF_EVENTS_NMI.
+
+config HAVE_NMI_WATCHDOG
+ depends on HAVE_NMI
+ bool
+ help
+ The arch provides a low level NMI watchdog. It provides
+ asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
+
+config HAVE_HARDLOCKUP_DETECTOR_ARCH
+ bool
+ select HAVE_NMI_WATCHDOG
+ help
+ The arch chooses to provide its own hardlockup detector, which is
+ a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
+ interfaces and parameters provided by hardlockup detector subsystem.
+
+config HAVE_PERF_REGS
+ bool
+ help
+ Support selective register dumps for perf events. This includes
+ bit-mapping of each registers and a unique architecture id.
+
+config HAVE_PERF_USER_STACK_DUMP
+ bool
+ help
+ Support user stack dumps for perf event samples. This needs
+ access to the user stack pointer which is not unified across
+ architectures.
+
+config HAVE_ARCH_JUMP_LABEL
+ bool
+
+config HAVE_ARCH_JUMP_LABEL_RELATIVE
+ bool
+
+config MMU_GATHER_TABLE_FREE
+ bool
+
+config MMU_GATHER_RCU_TABLE_FREE
+ bool
+ select MMU_GATHER_TABLE_FREE
+
+config MMU_GATHER_PAGE_SIZE
+ bool
+
+config MMU_GATHER_NO_RANGE
+ bool
+ select MMU_GATHER_MERGE_VMAS
+
+config MMU_GATHER_NO_FLUSH_CACHE
+ bool
+
+config MMU_GATHER_MERGE_VMAS
+ bool
+
+config MMU_GATHER_NO_GATHER
+ bool
+ depends on MMU_GATHER_TABLE_FREE
+
+config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ bool
+ help
+ Temporary select until all architectures can be converted to have
+ irqs disabled over activate_mm. Architectures that do IPI based TLB
+ shootdowns should enable this.
+
+config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
+config HAVE_ALIGNED_STRUCT_PAGE
+ bool
+ help
+ This makes sure that struct pages are double word aligned and that
+ e.g. the SLUB allocator can perform double word atomic operations
+ on a struct page for better performance. However selecting this
+ might increase the size of a struct page by a word.
+
+config HAVE_CMPXCHG_LOCAL
+ bool
+
+config HAVE_CMPXCHG_DOUBLE
+ bool
+
+config ARCH_WEAK_RELEASE_ACQUIRE
+ bool
+
+config ARCH_WANT_IPC_PARSE_VERSION
+ bool
+
+config ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ bool
+
+config ARCH_WANT_OLD_COMPAT_IPC
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ bool
+
+config HAVE_ARCH_SECCOMP
+ bool
+ help
+ An arch should select this symbol to support seccomp mode 1 (the fixed
+ syscall policy), and must provide an overrides for __NR_seccomp_sigreturn,
+ and compat syscalls if the asm-generic/seccomp.h defaults need adjustment:
+ - __NR_seccomp_read_32
+ - __NR_seccomp_write_32
+ - __NR_seccomp_exit_32
+ - __NR_seccomp_sigreturn_32
+
+config HAVE_ARCH_SECCOMP_FILTER
+ bool
+ select HAVE_ARCH_SECCOMP
+ help
+ An arch should select this symbol if it provides all of these things:
+ - all the requirements for HAVE_ARCH_SECCOMP
+ - syscall_get_arch()
+ - syscall_get_arguments()
+ - syscall_rollback()
+ - syscall_set_return_value()
+ - SIGSYS siginfo_t support
+ - secure_computing is called from a ptrace_event()-safe context
+ - secure_computing return value is checked and a return value of -1
+ results in the system call being skipped immediately.
+ - seccomp syscall wired up
+ - if !HAVE_SPARSE_SYSCALL_NR, have SECCOMP_ARCH_NATIVE,
+ SECCOMP_ARCH_NATIVE_NR, SECCOMP_ARCH_NATIVE_NAME defined. If
+ COMPAT is supported, have the SECCOMP_ARCH_COMPAT* defines too.
+
+config SECCOMP
+ prompt "Enable seccomp to safely execute untrusted bytecode"
+ def_bool y
+ depends on HAVE_ARCH_SECCOMP
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to handle untrusted bytecode during their
+ execution. By using pipes or other transports made available
+ to the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in their
+ own address space using seccomp. Once seccomp is enabled via
+ prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be
+ disabled and the task is only allowed to execute a few safe
+ syscalls defined by each seccomp mode.
+
+ If unsure, say Y.
+
+config SECCOMP_FILTER
+ def_bool y
+ depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
+ help
+ Enable tasks to build secure computing environments defined
+ in terms of Berkeley Packet Filter programs which implement
+ task-defined system call filtering polices.
+
+ See Documentation/userspace-api/seccomp_filter.rst for details.
+
+config SECCOMP_CACHE_DEBUG
+ bool "Show seccomp filter cache status in /proc/pid/seccomp_cache"
+ depends on SECCOMP_FILTER && !HAVE_SPARSE_SYSCALL_NR
+ depends on PROC_FS
+ help
+ This enables the /proc/pid/seccomp_cache interface to monitor
+ seccomp cache data. The file format is subject to change. Reading
+ the file requires CAP_SYS_ADMIN.
+
+ This option is for debugging only. Enabling presents the risk that
+ an adversary may be able to infer the seccomp filter logic.
+
+ If unsure, say N.
+
+config HAVE_ARCH_STACKLEAK
+ bool
+ help
+ An architecture should select this if it has the code which
+ fills the used part of the kernel stack with the STACKLEAK_POISON
+ value before returning from system calls.
+
+config HAVE_STACKPROTECTOR
+ bool
+ help
+ An arch should select this symbol if:
+ - it has implemented a stack canary (e.g. __stack_chk_guard)
+
+config STACKPROTECTOR
+ bool "Stack Protector buffer overflow detection"
+ depends on HAVE_STACKPROTECTOR
+ depends on $(cc-option,-fstack-protector)
+ default y
+ help
+ This option turns on the "stack-protector" GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+ the stack just before the return address, and validates
+ the value just before actually returning. Stack based buffer
+ overflows (that need to overwrite this return address) now also
+ overwrite the canary, which gets detected and the attack is then
+ neutralized via a kernel panic.
+
+ Functions will have the stack-protector canary logic added if they
+ have an 8-byte or larger character array on the stack.
+
+ This feature requires gcc version 4.2 or above, or a distribution
+ gcc with the feature backported ("-fstack-protector").
+
+ On an x86 "defconfig" build, this feature adds canary checks to
+ about 3% of all kernel functions, which increases kernel code size
+ by about 0.3%.
+
+config STACKPROTECTOR_STRONG
+ bool "Strong Stack Protector"
+ depends on STACKPROTECTOR
+ depends on $(cc-option,-fstack-protector-strong)
+ default y
+ help
+ Functions will have the stack-protector canary logic added in any
+ of the following conditions:
+
+ - local variable's address used as part of the right hand side of an
+ assignment or function argument
+ - local variable is an array (or union containing an array),
+ regardless of array type or length
+ - uses register local variables
+
+ This feature requires gcc version 4.9 or above, or a distribution
+ gcc with the feature backported ("-fstack-protector-strong").
+
+ On an x86 "defconfig" build, this feature adds canary checks to
+ about 20% of all kernel functions, which increases the kernel code
+ size by about 2%.
+
+config ARCH_SUPPORTS_SHADOW_CALL_STACK
+ bool
+ help
+ An architecture should select this if it supports the compiler's
+ Shadow Call Stack and implements runtime support for shadow stack
+ switching.
+
+config SHADOW_CALL_STACK
+ bool "Shadow Call Stack"
+ depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+ depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ help
+ This option enables the compiler's Shadow Call Stack, which
+ uses a shadow stack to protect function return addresses from
+ being overwritten by an attacker. More information can be found
+ in the compiler's documentation:
+
+ - Clang: https://clang.llvm.org/docs/ShadowCallStack.html
+ - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
+
+ Note that security guarantees in the kernel differ from the
+ ones documented for user space. The kernel must store addresses
+ of shadow stacks in memory, which means an attacker capable of
+ reading and writing arbitrary memory may be able to locate them
+ and hijack control flow by modifying the stacks.
+
+config LTO
+ bool
+ help
+ Selected if the kernel will be built using the compiler's LTO feature.
+
+config LTO_CLANG
+ bool
+ select LTO
+ help
+ Selected if the kernel will be built using Clang's LTO feature.
+
+config ARCH_SUPPORTS_LTO_CLANG
+ bool
+ help
+ An architecture should select this option if it supports:
+ - compiling with Clang,
+ - compiling inline assembly with Clang's integrated assembler,
+ - and linking with LLD.
+
+config ARCH_SUPPORTS_LTO_CLANG_THIN
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ ThinLTO mode.
+
+config HAS_LTO_CLANG
+ def_bool y
+ depends on CC_IS_CLANG && LD_IS_LLD && AS_IS_LLVM
+ depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
+ depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
+ depends on ARCH_SUPPORTS_LTO_CLANG
+ depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
+ depends on !KASAN || KASAN_HW_TAGS
+ depends on !GCOV_KERNEL
+ help
+ The compiler and Kconfig options support building with Clang's
+ LTO.
+
+choice
+ prompt "Link Time Optimization (LTO)"
+ default LTO_NONE
+ help
+ This option enables Link Time Optimization (LTO), which allows the
+ compiler to optimize binaries globally.
+
+ If unsure, select LTO_NONE. Note that LTO is very resource-intensive
+ so it's disabled by default.
+
+config LTO_NONE
+ bool "None"
+ help
+ Build the kernel normally, without Link Time Optimization (LTO).
+
+config LTO_CLANG_FULL
+ bool "Clang Full LTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG
+ depends on !COMPILE_TEST
+ select LTO_CLANG
+ help
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
+
+ https://llvm.org/docs/LinkTimeOptimization.html
+
+ During link time, this option can use a large amount of RAM, and
+ may take much longer than the ThinLTO option.
+
+config LTO_CLANG_THIN
+ bool "Clang ThinLTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
+ select LTO_CLANG
+ help
+ This option enables Clang's ThinLTO, which allows for parallel
+ optimization and faster incremental compiles compared to the
+ CONFIG_LTO_CLANG_FULL option. More information can be found
+ from Clang's documentation:
+
+ https://clang.llvm.org/docs/ThinLTO.html
+
+ If unsure, say Y.
+endchoice
+
+config ARCH_SUPPORTS_CFI_CLANG
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ Control-Flow Integrity (CFI) checking.
+
+config ARCH_USES_CFI_TRAPS
+ bool
+
+config CFI_CLANG
+ bool "Use Clang's Control Flow Integrity (CFI)"
+ depends on ARCH_SUPPORTS_CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi)
+ help
+ This option enables Clang’s forward-edge Control Flow Integrity
+ (CFI) checking, where the compiler injects a runtime check to each
+ indirect function call to ensure the target is a valid function with
+ the correct static type. This restricts possible call targets and
+ makes it more difficult for an attacker to exploit bugs that allow
+ the modification of stored function pointers. More information can be
+ found from Clang's documentation:
+
+ https://clang.llvm.org/docs/ControlFlowIntegrity.html
+
+config CFI_PERMISSIVE
+ bool "Use CFI in permissive mode"
+ depends on CFI_CLANG
+ help
+ When selected, Control Flow Integrity (CFI) violations result in a
+ warning instead of a kernel panic. This option should only be used
+ for finding indirect call type mismatches during development.
+
+ If unsure, say N.
+
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
+config HAVE_CONTEXT_TRACKING_USER
+ bool
+ help
+ Provide kernel/user boundaries probes necessary for subsystems
+ that need it, such as userspace RCU extended quiescent state.
+ Syscalls need to be wrapped inside user_exit()-user_enter(), either
+ optimized behind static key or through the slow path using TIF_NOHZ
+ flag. Exceptions handlers must be wrapped as well. Irqs are already
+ protected inside ct_irq_enter/ct_irq_exit() but preemption or signal
+ handling on irq exit still need to be protected.
+
+config HAVE_CONTEXT_TRACKING_USER_OFFSTACK
+ bool
+ help
+ Architecture neither relies on exception_enter()/exception_exit()
+ nor on schedule_user(). Also preempt_schedule_notrace() and
+ preempt_schedule_irq() can't be called in a preemptible section
+ while context tracking is CONTEXT_USER. This feature reflects a sane
+ entry implementation where the following requirements are met on
+ critical entry code, ie: before user_exit() or after user_enter():
+
+ - Critical entry code isn't preemptible (or better yet:
+ not interruptible).
+ - No use of RCU read side critical sections, unless ct_nmi_enter()
+ got called.
+ - No use of instrumentation, unless instrumentation_begin() got
+ called.
+
+config HAVE_TIF_NOHZ
+ bool
+ help
+ Arch relies on TIF_NOHZ and syscall slow path to implement context
+ tracking calls to user_enter()/user_exit().
+
+config HAVE_VIRT_CPU_ACCOUNTING
+ bool
+
+config HAVE_VIRT_CPU_ACCOUNTING_IDLE
+ bool
+ help
+ Architecture has its own way to account idle CPU time and therefore
+ doesn't implement vtime_account_idle().
+
+config ARCH_HAS_SCALED_CPUTIME
+ bool
+
+config HAVE_VIRT_CPU_ACCOUNTING_GEN
+ bool
+ default y if 64BIT
+ help
+ With VIRT_CPU_ACCOUNTING_GEN, cputime_t becomes 64-bit.
+ Before enabling this option, arch code must be audited
+ to ensure there are no races in concurrent read/write of
+ cputime_t. For example, reading/writing 64-bit cputime_t on
+ some 32-bit arches may require multiple accesses, so proper
+ locking is needed to protect against concurrent accesses.
+
+config HAVE_IRQ_TIME_ACCOUNTING
+ bool
+ help
+ Archs need to ensure they use a high enough resolution clock to
+ support irq time accounting and then call enable_sched_clock_irqtime().
+
+config HAVE_MOVE_PUD
+ bool
+ help
+ Architectures that select this are able to move page tables at the
+ PUD level. If there are only 3 page table levels, the move effectively
+ happens at the PGD level.
+
+config HAVE_MOVE_PMD
+ bool
+ help
+ Archs that select this are able to move page tables at the PMD level.
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ bool
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ bool
+
+config HAVE_ARCH_HUGE_VMAP
+ bool
+
+#
+# Archs that select this would be capable of PMD-sized vmaps (i.e.,
+# arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
+# must be used to enable allocations to use hugepages.
+#
+config HAVE_ARCH_HUGE_VMALLOC
+ depends on HAVE_ARCH_HUGE_VMAP
+ bool
+
+config ARCH_WANT_HUGE_PMD_SHARE
+ bool
+
+config HAVE_ARCH_SOFT_DIRTY
+ bool
+
+config HAVE_MOD_ARCH_SPECIFIC
+ bool
+ help
+ The arch uses struct mod_arch_specific to store data. Many arches
+ just need a simple module loader without arch specific data - those
+ should not enable this.
+
+config MODULES_USE_ELF_RELA
+ bool
+ help
+ Modules only use ELF RELA relocations. Modules with ELF REL
+ relocations will give an error.
+
+config MODULES_USE_ELF_REL
+ bool
+ help
+ Modules only use ELF REL relocations. Modules with ELF RELA
+ relocations will give an error.
+
+config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ bool
+ help
+ For architectures like powerpc/32 which have constraints on module
+ allocation and need to allocate module data outside of module area.
+
+config HAVE_IRQ_EXIT_ON_IRQ_STACK
+ bool
+ help
+ Architecture doesn't only execute the irq handler on the irq stack
+ but also irq_exit(). This way we can process softirqs on this irq
+ stack instead of switching to a new one when we call __do_softirq()
+ in the end of an hardirq.
+ This spares a stack switch and improves cache usage on softirq
+ processing.
+
+config HAVE_SOFTIRQ_ON_OWN_STACK
+ bool
+ help
+ Architecture provides a function to run __do_softirq() on a
+ separate stack.
+
+config SOFTIRQ_ON_OWN_STACK
+ def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
+
+config ALTERNATE_USER_ADDRESS_SPACE
+ bool
+ help
+ Architectures set this when the CPU uses separate address
+ spaces for kernel and user space pointers. In this case, the
+ access_ok() check on a __user pointer is skipped.
+
+config PGTABLE_LEVELS
+ int
+ default 2
+
+config ARCH_HAS_ELF_RANDOMIZE
+ bool
+ help
+ An architecture supports choosing randomized locations for
+ stack, mmap, brk, and ET_DYN. Defined functions:
+ - arch_mmap_rnd()
+ - arch_randomize_brk()
+
+config HAVE_ARCH_MMAP_RND_BITS
+ bool
+ help
+ An arch should select this symbol if it supports setting a variable
+ number of bits for use in establishing the base address for mmap
+ allocations, has MMU enabled and provides values for both:
+ - ARCH_MMAP_RND_BITS_MIN
+ - ARCH_MMAP_RND_BITS_MAX
+
+config HAVE_EXIT_THREAD
+ bool
+ help
+ An architecture implements exit_thread.
+
+config ARCH_MMAP_RND_BITS_MIN
+ int
+
+config ARCH_MMAP_RND_BITS_MAX
+ int
+
+config ARCH_MMAP_RND_BITS_DEFAULT
+ int
+
+config ARCH_MMAP_RND_BITS
+ int "Number of bits to use for ASLR of mmap base address" if EXPERT
+ range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+ default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+ default ARCH_MMAP_RND_BITS_MIN
+ depends on HAVE_ARCH_MMAP_RND_BITS
+ help
+ This value can be used to select the number of bits to use to
+ determine the random offset to the base address of vma regions
+ resulting from mmap allocations. This value will be bounded
+ by the architecture's minimum and maximum supported values.
+
+ This value can be changed after boot using the
+ /proc/sys/vm/mmap_rnd_bits tunable
+
+config HAVE_ARCH_MMAP_RND_COMPAT_BITS
+ bool
+ help
+ An arch should select this symbol if it supports running applications
+ in compatibility mode, supports setting a variable number of bits for
+ use in establishing the base address for mmap allocations, has MMU
+ enabled and provides values for both:
+ - ARCH_MMAP_RND_COMPAT_BITS_MIN
+ - ARCH_MMAP_RND_COMPAT_BITS_MAX
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ int
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+ int
+
+config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+ int
+
+config ARCH_MMAP_RND_COMPAT_BITS
+ int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+ range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+ default ARCH_MMAP_RND_COMPAT_BITS_MIN
+ depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+ help
+ This value can be used to select the number of bits to use to
+ determine the random offset to the base address of vma regions
+ resulting from mmap allocations for compatible applications This
+ value will be bounded by the architecture's minimum and maximum
+ supported values.
+
+ This value can be changed after boot using the
+ /proc/sys/vm/mmap_rnd_compat_bits tunable
+
+config HAVE_ARCH_COMPAT_MMAP_BASES
+ bool
+ help
+ This allows 64bit applications to invoke 32-bit mmap() syscall
+ and vice-versa 32-bit applications to call 64-bit mmap().
+ Required for applications doing different bitness syscalls.
+
+config PAGE_SIZE_LESS_THAN_64KB
+ def_bool y
+ depends on !ARM64_64K_PAGES
+ depends on !IA64_PAGE_SIZE_64KB
+ depends on !PAGE_SIZE_64KB
+ depends on !PARISC_PAGE_SIZE_64KB
+ depends on PAGE_SIZE_LESS_THAN_256KB
+
+config PAGE_SIZE_LESS_THAN_256KB
+ def_bool y
+ depends on !PAGE_SIZE_256KB
+
+# This allows to use a set of generic functions to determine mmap base
+# address by giving priority to top-down scheme only if the process
+# is not in legacy mode (compat task, unlimited stack size or
+# sysctl_legacy_va_layout).
+# Architecture that selects this option can provide its own version of:
+# - STACK_RND_MASK
+config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ bool
+ depends on MMU
+ select ARCH_HAS_ELF_RANDOMIZE
+
+config HAVE_OBJTOOL
+ bool
+
+config HAVE_JUMP_LABEL_HACK
+ bool
+
+config HAVE_NOINSTR_HACK
+ bool
+
+config HAVE_NOINSTR_VALIDATION
+ bool
+
+config HAVE_UACCESS_VALIDATION
+ bool
+ select OBJTOOL
+
+config HAVE_STACK_VALIDATION
+ bool
+ help
+ Architecture supports objtool compile-time frame pointer rule
+ validation.
+
+config HAVE_RELIABLE_STACKTRACE
+ bool
+ help
+ Architecture has either save_stack_trace_tsk_reliable() or
+ arch_stack_walk_reliable() function which only returns a stack trace
+ if it can guarantee the trace is reliable.
+
+config HAVE_ARCH_HASH
+ bool
+ default n
+ help
+ If this is set, the architecture provides an <asm/hash.h>
+ file which provides platform-specific implementations of some
+ functions in <linux/hash.h> or fs/namei.c.
+
+config HAVE_ARCH_NVRAM_OPS
+ bool
+
+config ISA_BUS_API
+ def_bool ISA
+
+#
+# ABI hall of shame
+#
+config CLONE_BACKWARDS
+ bool
+ help
+ Architecture has tls passed as the 4th argument of clone(2),
+ not the 5th one.
+
+config CLONE_BACKWARDS2
+ bool
+ help
+ Architecture has the first two arguments of clone(2) swapped.
+
+config CLONE_BACKWARDS3
+ bool
+ help
+ Architecture has tls passed as the 3rd argument of clone(2),
+ not the 5th one.
+
+config ODD_RT_SIGACTION
+ bool
+ help
+ Architecture has unusual rt_sigaction(2) arguments
+
+config OLD_SIGSUSPEND
+ bool
+ help
+ Architecture has old sigsuspend(2) syscall, of one-argument variety
+
+config OLD_SIGSUSPEND3
+ bool
+ help
+ Even weirder antique ABI - three-argument sigsuspend(2)
+
+config OLD_SIGACTION
+ bool
+ help
+ Architecture has old sigaction(2) syscall. Nope, not the same
+ as OLD_SIGSUSPEND | OLD_SIGSUSPEND3 - alpha has sigsuspend(2),
+ but fairly different variant of sigaction(2), thanks to OSF/1
+ compatibility...
+
+config COMPAT_OLD_SIGACTION
+ bool
+
+config COMPAT_32BIT_TIME
+ bool "Provide system calls for 32-bit time_t"
+ default !64BIT || COMPAT
+ help
+ This enables 32 bit time_t support in addition to 64 bit time_t support.
+ This is relevant on all 32-bit architectures, and 64-bit architectures
+ as part of compat syscall handling.
+
+config ARCH_NO_PREEMPT
+ bool
+
+config ARCH_EPHEMERAL_INODES
+ def_bool n
+ help
+ An arch should select this symbol if it doesn't keep track of inode
+ instances on its own, but instead relies on something else (e.g. the
+ host kernel for an UML kernel).
+
+config ARCH_SUPPORTS_RT
+ bool
+
+config CPU_NO_EFFICIENT_FFS
+ def_bool n
+
+config HAVE_ARCH_VMAP_STACK
+ def_bool n
+ help
+ An arch should select this symbol if it can support kernel stacks
+ in vmalloc space. This means:
+
+ - vmalloc space must be large enough to hold many kernel stacks.
+ This may rule out many 32-bit architectures.
+
+ - Stacks in vmalloc space need to work reliably. For example, if
+ vmap page tables are created on demand, either this mechanism
+ needs to work while the stack points to a virtual address with
+ unpopulated page tables or arch code (switch_to() and switch_mm(),
+ most likely) needs to ensure that the stack's page table entries
+ are populated before running on a possibly unpopulated stack.
+
+ - If the stack overflows into a guard page, something reasonable
+ should happen. The definition of "reasonable" is flexible, but
+ instantly rebooting without logging anything would be unfriendly.
+
+config VMAP_STACK
+ default y
+ bool "Use a virtually-mapped stack"
+ depends on HAVE_ARCH_VMAP_STACK
+ depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
+ help
+ Enable this if you want the use virtually-mapped kernel stacks
+ with guard pages. This causes kernel stack overflows to be
+ caught immediately rather than causing difficult-to-diagnose
+ corruption.
+
+ To use this with software KASAN modes, the architecture must support
+ backing virtual mappings with real shadow memory, and KASAN_VMALLOC
+ must be enabled.
+
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ def_bool n
+ help
+ An arch should select this symbol if it can support kernel stack
+ offset randomization with calls to add_random_kstack_offset()
+ during syscall entry and choose_random_kstack_offset() during
+ syscall exit. Careful removal of -fstack-protector-strong and
+ -fstack-protector should also be applied to the entry code and
+ closely examined, as the artificial stack bump looks like an array
+ to the compiler, so it will attempt to add canary checks regardless
+ of the static branch state.
+
+config RANDOMIZE_KSTACK_OFFSET
+ bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT
+ default y
+ depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
+ help
+ The kernel stack offset can be randomized (after pt_regs) by
+ roughly 5 bits of entropy, frustrating memory corruption
+ attacks that depend on stack address determinism or
+ cross-syscall address exposures.
+
+ The feature is controlled via the "randomize_kstack_offset=on/off"
+ kernel boot param, and if turned off has zero overhead due to its use
+ of static branches (see JUMP_LABEL).
+
+ If unsure, say Y.
+
+config RANDOMIZE_KSTACK_OFFSET_DEFAULT
+ bool "Default state of kernel stack offset randomization"
+ depends on RANDOMIZE_KSTACK_OFFSET
+ help
+ Kernel stack offset randomization is controlled by kernel boot param
+ "randomize_kstack_offset=on/off", and this config chooses the default
+ boot state.
+
+config ARCH_OPTIONAL_KERNEL_RWX
+ def_bool n
+
+config ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ def_bool n
+
+config ARCH_HAS_STRICT_KERNEL_RWX
+ def_bool n
+
+config STRICT_KERNEL_RWX
+ bool "Make kernel text and rodata read-only" if ARCH_OPTIONAL_KERNEL_RWX
+ depends on ARCH_HAS_STRICT_KERNEL_RWX
+ default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ help
+ If this is set, kernel text and rodata memory will be made read-only,
+ and non-text memory will be made non-executable. This provides
+ protection against certain security exploits (e.g. executing the heap
+ or modifying text)
+
+ These features are considered standard security practice these days.
+ You should say Y here in almost all cases.
+
+config ARCH_HAS_STRICT_MODULE_RWX
+ def_bool n
+
+config STRICT_MODULE_RWX
+ bool "Set loadable kernel module data as NX and text as RO" if ARCH_OPTIONAL_KERNEL_RWX
+ depends on ARCH_HAS_STRICT_MODULE_RWX && MODULES
+ default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ help
+ If this is set, module text and rodata memory will be made read-only,
+ and non-text memory will be made non-executable. This provides
+ protection against certain security exploits (e.g. writing to text)
+
+# select if the architecture provides an asm/dma-direct.h header
+config ARCH_HAS_PHYS_TO_DMA
+ bool
+
+config HAVE_ARCH_COMPILER_H
+ bool
+ help
+ An architecture can select this if it provides an
+ asm/compiler.h header that should be included after
+ linux/compiler-*.h in order to override macro definitions that those
+ headers generally provide.
+
+config HAVE_ARCH_PREL32_RELOCATIONS
+ bool
+ help
+ May be selected by an architecture if it supports place-relative
+ 32-bit relocations, both in the toolchain and in the module loader,
+ in which case relative references can be used in special sections
+ for PCI fixup, initcalls etc which are only half the size on 64 bit
+ architectures, and don't require runtime relocation on relocatable
+ kernels.
+
+config ARCH_USE_MEMREMAP_PROT
+ bool
+
+config LOCK_EVENT_COUNTS
+ bool "Locking event counts collection"
+ depends on DEBUG_FS
+ help
+ Enable light-weight counting of various locking related events
+ in the system with minimal performance impact. This reduces
+ the chance of application behavior change because of timing
+ differences. The counts are reported via debugfs.
+
+# Select if the architecture has support for applying RELR relocations.
+config ARCH_HAS_RELR
+ bool
+
+config RELR
+ bool "Use RELR relocation packing"
+ depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
+ default y
+ help
+ Store the kernel's dynamic relocations in the RELR relocation packing
+ format. Requires a compatible linker (LLD supports this feature), as
+ well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
+ are compatible).
+
+config ARCH_HAS_MEM_ENCRYPT
+ bool
+
+config ARCH_HAS_CC_PLATFORM
+ bool
+
+config HAVE_SPARSE_SYSCALL_NR
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
+ to save space. For example, MIPS architecture has a syscall array with
+ entries at 4000, 5000 and 6000 locations. This option turns on syscall
+ related optimizations for a given architecture.
+
+config ARCH_HAS_VDSO_DATA
+ bool
+
+config HAVE_STATIC_CALL
+ bool
+
+config HAVE_STATIC_CALL_INLINE
+ bool
+ depends on HAVE_STATIC_CALL
+ select OBJTOOL
+
+config HAVE_PREEMPT_DYNAMIC
+ bool
+
+config HAVE_PREEMPT_DYNAMIC_CALL
+ bool
+ depends on HAVE_STATIC_CALL
+ select HAVE_PREEMPT_DYNAMIC
+ help
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static calls.
+
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
+ preemption function will be patched directly.
+
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
+ call to a preemption function will go through a trampoline, and the
+ trampoline will be patched.
+
+ It is strongly advised to support inline static call to avoid any
+ overhead.
+
+config HAVE_PREEMPT_DYNAMIC_KEY
+ bool
+ depends on HAVE_ARCH_JUMP_LABEL
+ select HAVE_PREEMPT_DYNAMIC
+ help
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static keys.
+
+ Each preemption function will be given an early return based on a
+ static key. This should have slightly lower overhead than non-inline
+ static calls, as this effectively inlines each trampoline into the
+ start of its callee. This may avoid redundant work, and may
+ integrate better with CFI schemes.
+
+ This will have greater overhead than using inline static calls as
+ the call to the preemption function cannot be entirely elided.
+
+config ARCH_WANT_LD_ORPHAN_WARN
+ bool
+ help
+ An arch should select this symbol once all linker sections are explicitly
+ included, size-asserted, or discarded in the linker scripts. This is
+ important because we never want expected sections to be placed heuristically
+ by the linker, since the locations of such sections can change between linker
+ versions.
+
+config HAVE_ARCH_PFN_VALID
+ bool
+
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ bool
+
+config ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ bool
+
+config ARCH_SPLIT_ARG64
+ bool
+ help
+ If a 32-bit architecture requires 64-bit arguments to be split into
+ pairs of 32-bit arguments, select this option.
+
+config ARCH_HAS_ELFCORE_COMPAT
+ bool
+
+config ARCH_HAS_PARANOID_L1D_FLUSH
+ bool
+
+config ARCH_HAVE_TRACE_MMIO_ACCESS
+ bool
+
+config DYNAMIC_SIGFRAME
+ bool
+
+# Select, if arch has a named attribute group bound to NUMA device nodes.
+config HAVE_ARCH_NODE_DEV_GROUP
+ bool
+
+config ARCH_HAS_NONLEAF_PMD_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in non-leaf PMD entries when using them as part of linear
+ address translations. Page table walkers that clear the accessed bit
+ may use this capability to reduce their search space.
+
+source "kernel/gcov/Kconfig"
+
+source "scripts/gcc-plugins/Kconfig"
+
+endmenu