diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
commit | 8b0a8165cdad0f4133837d753649ef4682e42c3b (patch) | |
tree | 5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /mm/slub.c | |
parent | Releasing progress-linux version 6.8.12-1~progress7.99u1. (diff) | |
download | linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 116 |
1 files changed, 49 insertions, 67 deletions
@@ -295,7 +295,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* * Debugging flags that require metadata to be stored in the slab. These get - * disabled when slub_debug=O is used and a cache's min order increases with + * disabled when slab_debug=O is used and a cache's min order increases with * metadata. */ #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Internal SLUB flags */ /* Poison object */ -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) +#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) /* Use cmpxchg_double */ #ifdef system_has_freelist_aba -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) +#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) #else -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) +#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED #endif /* @@ -391,7 +391,7 @@ struct kmem_cache_cpu { }; struct slab *slab; /* The slab from which we are allocating */ #ifdef CONFIG_SLUB_CPU_PARTIAL - struct slab *partial; /* Partially allocated frozen slabs */ + struct slab *partial; /* Partially allocated slabs */ #endif local_lock_t lock; /* Protects the fields above */ #ifdef CONFIG_SLUB_STATS @@ -1498,16 +1498,8 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); - /* - * May be called early in order to allocate a slab for the - * kmem_cache_node structure. Solve the chicken-egg - * dilemma by deferring the increment of the count during - * bootstrap (see early_kmem_cache_node_alloc). - */ - if (likely(n)) { - atomic_long_inc(&n->nr_slabs); - atomic_long_add(objects, &n->total_objects); - } + atomic_long_inc(&n->nr_slabs); + atomic_long_add(objects, &n->total_objects); } static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) { @@ -1616,7 +1608,7 @@ static inline int free_consistency_checks(struct kmem_cache *s, } /* - * Parse a block of slub_debug options. Blocks are delimited by ';' + * Parse a block of slab_debug options. Blocks are delimited by ';' * * @str: start of block * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified @@ -1677,7 +1669,7 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) break; default: if (init) - pr_err("slub_debug option '%c' unknown. skipped\n", *str); + pr_err("slab_debug option '%c' unknown. skipped\n", *str); } } check_slabs: @@ -1736,7 +1728,7 @@ static int __init setup_slub_debug(char *str) /* * For backwards compatibility, a single list of flags with list of * slabs means debugging is only changed for those slabs, so the global - * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending + * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as * long as there is no option specifying flags without a slab list. */ @@ -1760,21 +1752,20 @@ out: return 1; } -__setup("slub_debug", setup_slub_debug); +__setup("slab_debug", setup_slub_debug); +__setup_param("slub_debug", slub_debug, setup_slub_debug, 0); /* * kmem_cache_flags - apply debugging options to the cache - * @object_size: the size of an object without meta data * @flags: flags to set * @name: name of the cache * * Debug option(s) are applied to @flags. In addition to the debug * option(s), if a slab name (or multiple) is specified i.e. - * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... + * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... * then only the select slabs will receive the debug option(s). */ -slab_flags_t kmem_cache_flags(unsigned int object_size, - slab_flags_t flags, const char *name) +slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) { char *iter; size_t len; @@ -1850,8 +1841,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) {} static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) {} -slab_flags_t kmem_cache_flags(unsigned int object_size, - slab_flags_t flags, const char *name) +slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) { return flags; } @@ -2038,11 +2028,6 @@ void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, obj_cgroup_uncharge(objcg, objects * obj_full_size(s)); } #else /* CONFIG_MEMCG_KMEM */ -static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) -{ - return NULL; -} - static inline void memcg_free_slab_cgroups(struct slab *slab) { } @@ -2248,7 +2233,7 @@ static void __init init_freelist_randomization(void) } /* Get the next entry on the pre-computed freelist randomized */ -static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, +static void *next_freelist_entry(struct kmem_cache *s, unsigned long *pos, void *start, unsigned long page_limit, unsigned long freelist_count) @@ -2287,13 +2272,12 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) start = fixup_red_left(s, slab_address(slab)); /* First entry is used as the base of the freelist */ - cur = next_freelist_entry(s, slab, &pos, start, page_limit, - freelist_count); + cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); cur = setup_object(s, cur); slab->freelist = cur; for (idx = 1; idx < slab->objects; idx++) { - next = next_freelist_entry(s, slab, &pos, start, page_limit, + next = next_freelist_entry(s, &pos, start, page_limit, freelist_count); next = setup_object(s, next); set_freepointer(s, cur, next); @@ -3268,7 +3252,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) oo_order(s->min)); if (oo_order(s->min) > get_order(s->object_size)) - pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", + pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", s->name); for_each_kmem_cache_node(s, node, n) { @@ -3331,7 +3315,6 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) counters = slab->counters; new.counters = counters; - VM_BUG_ON(!new.frozen); new.inuse = slab->objects; new.frozen = freelist != NULL; @@ -3503,18 +3486,20 @@ new_slab: slab = slub_percpu_partial(c); slub_set_percpu_partial(c, slab); - local_unlock_irqrestore(&s->cpu_slab->lock, flags); - stat(s, CPU_PARTIAL_ALLOC); - if (unlikely(!node_match(slab, node) || - !pfmemalloc_match(slab, gfpflags))) { - slab->next = NULL; - __put_partials(s, slab); - continue; + if (likely(node_match(slab, node) && + pfmemalloc_match(slab, gfpflags))) { + c->slab = slab; + freelist = get_freelist(s, slab); + VM_BUG_ON(!freelist); + stat(s, CPU_PARTIAL_ALLOC); + goto load_freelist; } - freelist = freeze_slab(s, slab); - goto retry_load_slab; + local_unlock_irqrestore(&s->cpu_slab->lock, flags); + + slab->next = NULL; + __put_partials(s, slab); } #endif @@ -3798,11 +3783,11 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, zero_size = orig_size; /* - * When slub_debug is enabled, avoid memory initialization integrated + * When slab_debug is enabled, avoid memory initialization integrated * into KASAN and instead zero out the memory via the memset below with * the proper size. Otherwise, KASAN might overwrite SLUB redzones and * cause false-positive reports. This does not lead to a performance - * penalty on production builds, as slub_debug is not intended to be + * penalty on production builds, as slab_debug is not intended to be * enabled there. */ if (__slub_debug_enabled()) @@ -4193,7 +4178,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, * then add it. */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { - remove_full(s, n, slab); add_partial(n, slab, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -4207,9 +4191,6 @@ slab_empty: */ remove_partial(n, slab); stat(s, FREE_REMOVE_PARTIAL); - } else { - /* Slab must be on the full list */ - remove_full(s, n, slab); } spin_unlock_irqrestore(&n->list_lock, flags); @@ -4708,8 +4689,8 @@ static unsigned int slub_min_objects; * activity on the partial lists which requires taking the list_lock. This is * less a concern for large slabs though which are rarely used. * - * slub_max_order specifies the order where we begin to stop considering the - * number of objects in a slab as critical. If we reach slub_max_order then + * slab_max_order specifies the order where we begin to stop considering the + * number of objects in a slab as critical. If we reach slab_max_order then * we try to keep the page order as low as possible. So we accept more waste * of space in favor of a small page order. * @@ -4776,14 +4757,14 @@ static inline int calculate_order(unsigned int size) * and backing off gradually. * * We start with accepting at most 1/16 waste and try to find the - * smallest order from min_objects-derived/slub_min_order up to - * slub_max_order that will satisfy the constraint. Note that increasing + * smallest order from min_objects-derived/slab_min_order up to + * slab_max_order that will satisfy the constraint. Note that increasing * the order can only result in same or less fractional waste, not more. * * If that fails, we increase the acceptable fraction of waste and try * again. The last iteration with fraction of 1/2 would effectively * accept any waste and give us the order determined by min_objects, as - * long as at least single object fits within slub_max_order. + * long as at least single object fits within slab_max_order. */ for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { order = calc_slab_order(size, min_order, slub_max_order, @@ -4793,7 +4774,7 @@ static inline int calculate_order(unsigned int size) } /* - * Doh this slab cannot be placed using slub_max_order. + * Doh this slab cannot be placed using slab_max_order. */ order = get_order(size); if (order <= MAX_PAGE_ORDER) @@ -4863,7 +4844,6 @@ static void early_kmem_cache_node_alloc(int node) slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); BUG_ON(!slab); - inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects); if (slab_nid(slab) != node) { pr_err("SLUB: Unable to allocate memory from node %d\n", node); pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); @@ -5110,7 +5090,7 @@ static int calculate_sizes(struct kmem_cache *s) static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) { - s->flags = kmem_cache_flags(s->size, flags, s->name); + s->flags = kmem_cache_flags(flags, s->name); #ifdef CONFIG_SLAB_FREELIST_HARDENED s->random = get_random_long(); #endif @@ -5319,7 +5299,9 @@ static int __init setup_slub_min_order(char *str) return 1; } -__setup("slub_min_order=", setup_slub_min_order); +__setup("slab_min_order=", setup_slub_min_order); +__setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); + static int __init setup_slub_max_order(char *str) { @@ -5332,7 +5314,8 @@ static int __init setup_slub_max_order(char *str) return 1; } -__setup("slub_max_order=", setup_slub_max_order); +__setup("slab_max_order=", setup_slub_max_order); +__setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); static int __init setup_slub_min_objects(char *str) { @@ -5341,7 +5324,8 @@ static int __init setup_slub_min_objects(char *str) return 1; } -__setup("slub_min_objects=", setup_slub_min_objects); +__setup("slab_min_objects=", setup_slub_min_objects); +__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); #ifdef CONFIG_HARDENED_USERCOPY /* @@ -5669,7 +5653,7 @@ void __init kmem_cache_init(void) /* Now we can use the kmem_cache to allocate kmalloc slabs */ setup_kmalloc_cache_index_table(); - create_kmalloc_caches(0); + create_kmalloc_caches(); /* Setup random freelists for each cache */ init_freelist_randomization(); @@ -6798,14 +6782,12 @@ out_del_kobj: void sysfs_slab_unlink(struct kmem_cache *s) { - if (slab_state >= FULL) - kobject_del(&s->kobj); + kobject_del(&s->kobj); } void sysfs_slab_release(struct kmem_cache *s) { - if (slab_state >= FULL) - kobject_put(&s->kobj); + kobject_put(&s->kobj); } /* |