diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /mm/Kconfig | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/Kconfig')
-rw-r--r-- | mm/Kconfig | 103 |
1 files changed, 46 insertions, 57 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 57cd378c73..ffc3a2ba3a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -61,6 +61,20 @@ config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON The cost is that if the page was never dirtied and needs to be swapped out again, it will be re-compressed. +config ZSWAP_SHRINKER_DEFAULT_ON + bool "Shrink the zswap pool on memory pressure" + depends on ZSWAP + default n + help + If selected, the zswap shrinker will be enabled, and the pages + stored in the zswap pool will become available for reclaim (i.e + written back to the backing swap device) on memory pressure. + + This means that zswap writeback could happen even if the pool is + not yet full, or the cgroup zswap limit has not been reached, + reducing the chance that cold pages will reside in the zswap pool + and consume memory indefinitely. + choice prompt "Default compressor" depends on ZSWAP @@ -226,52 +240,17 @@ config ZSMALLOC_CHAIN_SIZE For more information, see zsmalloc documentation. -menu "SLAB allocator options" - -choice - prompt "Choose SLAB allocator" - default SLUB - help - This option allows to select a slab allocator. - -config SLAB_DEPRECATED - bool "SLAB (DEPRECATED)" - depends on !PREEMPT_RT - help - Deprecated and scheduled for removal in a few cycles. Replaced by - SLUB. - - If you cannot migrate to SLUB, please contact linux-mm@kvack.org - and the people listed in the SLAB ALLOCATOR section of MAINTAINERS - file, explaining why. - - The regular slab allocator that is established and known to work - well in all environments. It organizes cache hot objects in - per cpu and per node queues. +menu "Slab allocator options" config SLUB - bool "SLUB (Unqueued Allocator)" - help - SLUB is a slab allocator that minimizes cache line usage - instead of managing queues of cached objects (SLAB approach). - Per cpu caching is realized using slabs of objects instead - of queues of objects. SLUB can use memory efficiently - and has enhanced diagnostics. SLUB is the default choice for - a slab allocator. - -endchoice - -config SLAB - bool - default y - depends on SLAB_DEPRECATED + def_bool y config SLUB_TINY - bool "Configure SLUB for minimal memory footprint" - depends on SLUB && EXPERT + bool "Configure for minimal memory footprint" + depends on EXPERT select SLAB_MERGE_DEFAULT help - Configures the SLUB allocator in a way to achieve minimal memory + Configures the slab allocator in a way to achieve minimal memory footprint, sacrificing scalability, debugging and other features. This is intended only for the smallest system that had used the SLOB allocator and is not recommended for systems with more than @@ -282,7 +261,6 @@ config SLUB_TINY config SLAB_MERGE_DEFAULT bool "Allow slab caches to be merged" default y - depends on SLAB || SLUB help For reduced kernel memory fragmentation, slab caches can be merged when they share the same size and other characteristics. @@ -296,7 +274,7 @@ config SLAB_MERGE_DEFAULT config SLAB_FREELIST_RANDOM bool "Randomize slab freelist" - depends on SLAB || (SLUB && !SLUB_TINY) + depends on !SLUB_TINY help Randomizes the freelist order used on creating new pages. This security feature reduces the predictability of the kernel slab @@ -304,21 +282,19 @@ config SLAB_FREELIST_RANDOM config SLAB_FREELIST_HARDENED bool "Harden slab freelist metadata" - depends on SLAB || (SLUB && !SLUB_TINY) + depends on !SLUB_TINY help Many kernel heap attacks try to target slab cache metadata and other infrastructure. This options makes minor performance sacrifices to harden the kernel slab allocator against common - freelist exploit methods. Some slab implementations have more - sanity-checking than others. This option is most effective with - CONFIG_SLUB. + freelist exploit methods. config SLUB_STATS default n - bool "Enable SLUB performance statistics" - depends on SLUB && SYSFS && !SLUB_TINY + bool "Enable performance statistics" + depends on SYSFS && !SLUB_TINY help - SLUB statistics are useful to debug SLUBs allocation behavior in + The statistics are useful to debug slab allocation behavior in order find ways to optimize the allocator. This should never be enabled for production use since keeping statistics slows down the allocator by a few percentage points. The slabinfo command @@ -328,8 +304,8 @@ config SLUB_STATS config SLUB_CPU_PARTIAL default y - depends on SLUB && SMP && !SLUB_TINY - bool "SLUB per cpu partial cache" + depends on SMP && !SLUB_TINY + bool "Enable per cpu partial caches" help Per cpu partial caches accelerate objects allocation and freeing that is local to a processor at the price of more indeterminism @@ -339,7 +315,7 @@ config SLUB_CPU_PARTIAL config RANDOM_KMALLOC_CACHES default n - depends on SLUB && !SLUB_TINY + depends on !SLUB_TINY bool "Randomize slab caches for normal kmalloc" help A hardening feature that creates multiple copies of slab caches for @@ -354,7 +330,7 @@ config RANDOM_KMALLOC_CACHES limited degree of memory and CPU overhead that relates to hardware and system workload. -endmenu # SLAB allocator options +endmenu # Slab allocator options config SHUFFLE_PAGE_ALLOCATOR bool "Page allocator randomization" @@ -367,7 +343,7 @@ config SHUFFLE_PAGE_ALLOCATOR the presence of a memory-side-cache. There are also incidental security benefits as it reduces the predictability of page allocations to compliment SLAB_FREELIST_RANDOM, but the - default granularity of shuffling on the MAX_ORDER i.e, 10th + default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th order of pages is selected based on cache utilization benefits on x86. @@ -699,8 +675,8 @@ config HUGETLB_PAGE_SIZE_VARIABLE HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available on a platform. - Note that the pageblock_order cannot exceed MAX_ORDER and will be - clamped down to MAX_ORDER. + Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be + clamped down to MAX_PAGE_ORDER. config CONTIG_ALLOC def_bool (MEMORY_ISOLATION && COMPACTION) || CMA @@ -756,7 +732,7 @@ config DEFAULT_MMAP_MIN_ADDR from userspace allocation. Keeping a user from writing to low pages can help reduce the impact of kernel NULL pointer bugs. - For most ia64, ppc64 and x86 users with lots of address space + For most ppc64 and x86 users with lots of address space a value of 65536 is reasonable and should cause no problems. On arm and other archs it should not be higher than 32768. Programs which use vm86 functionality or have some need to map @@ -859,6 +835,12 @@ choice madvise(MADV_HUGEPAGE) but it won't risk to increase the memory footprint of applications without a guaranteed benefit. + + config TRANSPARENT_HUGEPAGE_NEVER + bool "never" + help + Disable Transparent Hugepage by default. It can still be + enabled at runtime via sysfs. endchoice config THP_SWAP @@ -1254,6 +1236,10 @@ config LRU_GEN_STATS from evicted generations for debugging purpose. This option has a per-memcg and per-node memory overhead. + +config LRU_GEN_WALKS_MMU + def_bool y + depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG # } config ARCH_SUPPORTS_PER_VMA_LOCK @@ -1272,6 +1258,9 @@ config LOCK_MM_AND_FIND_VMA bool depends on !STACK_GROWSUP +config IOMMU_MM_DATA + bool + source "mm/damon/Kconfig" endmenu |