diff options
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r-- | lib/stackdepot.c | 645 |
1 files changed, 456 insertions, 189 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 2f5aa85183..91d2301fde 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -14,15 +14,21 @@ #define pr_fmt(fmt) "stackdepot: " fmt +#include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/kmsan.h> +#include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> -#include <linux/percpu.h> +#include <linux/poison.h> #include <linux/printk.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #include <linux/string.h> @@ -30,38 +36,11 @@ #include <linux/memblock.h> #include <linux/kasan-enabled.h> -#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) - -#define DEPOT_VALID_BITS 1 -#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ -#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) -#define DEPOT_STACK_ALIGN 4 -#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) -#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \ - DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS) #define DEPOT_POOLS_CAP 8192 +/* The pool_index is offset by 1 so the first record does not have a 0 handle. */ #define DEPOT_MAX_POOLS \ - (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ - (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) - -/* Compact structure that stores a reference to a stack. */ -union handle_parts { - depot_stack_handle_t handle; - struct { - u32 pool_index : DEPOT_POOL_INDEX_BITS; - u32 offset : DEPOT_OFFSET_BITS; - u32 valid : DEPOT_VALID_BITS; - u32 extra : STACK_DEPOT_EXTRA_BITS; - }; -}; - -struct stack_record { - struct stack_record *next; /* Link in the hash table */ - u32 hash; /* Hash in the hash table */ - u32 size; /* Number of stored frames */ - union handle_parts handle; - unsigned long entries[]; /* Variable-sized array of frames */ -}; + (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ + (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) static bool stack_depot_disabled; static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); @@ -75,40 +54,50 @@ static bool __stack_depot_early_init_passed __initdata; /* Initial seed for jhash2. */ #define STACK_HASH_SEED 0x9747b28c -/* Hash table of pointers to stored stack traces. */ -static struct stack_record **stack_table; +/* Hash table of stored stack records. */ +static struct list_head *stack_table; /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ static unsigned int stack_bucket_number_order; /* Hash mask for indexing the table. */ static unsigned int stack_hash_mask; -/* Array of memory regions that store stack traces. */ +/* Array of memory regions that store stack records. */ static void *stack_pools[DEPOT_MAX_POOLS]; -/* Currently used pool in stack_pools. */ -static int pool_index; +/* Newly allocated pool that is not yet added to stack_pools. */ +static void *new_pool; +/* Number of pools in stack_pools. */ +static int pools_num; /* Offset to the unused space in the currently used pool. */ -static size_t pool_offset; -/* Lock that protects the variables above. */ +static size_t pool_offset = DEPOT_POOL_SIZE; +/* Freelist of stack records within stack_pools. */ +static LIST_HEAD(free_stacks); +/* The lock must be held when performing pool or freelist modifications. */ static DEFINE_RAW_SPINLOCK(pool_lock); -/* - * Stack depot tries to keep an extra pool allocated even before it runs out - * of space in the currently used pool. - * This flag marks that this next extra pool needs to be allocated and - * initialized. It has the value 0 when either the next pool is not yet - * initialized or the limit on the number of pools is reached. - */ -static int next_pool_required = 1; + +/* Statistics counters for debugfs. */ +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS, + DEPOT_COUNTER_REFD_FREES, + DEPOT_COUNTER_REFD_INUSE, + DEPOT_COUNTER_FREELIST_SIZE, + DEPOT_COUNTER_PERSIST_COUNT, + DEPOT_COUNTER_PERSIST_BYTES, + DEPOT_COUNTER_COUNT, +}; +static long counters[DEPOT_COUNTER_COUNT]; +static const char *const counter_names[] = { + [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations", + [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees", + [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use", + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", + [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count", + [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes", +}; +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); static int __init disable_stack_depot(char *str) { - int ret; - - ret = kstrtobool(str, &stack_depot_disabled); - if (!ret && stack_depot_disabled) { - pr_info("disabled\n"); - stack_table = NULL; - } - return 0; + return kstrtobool(str, &stack_depot_disabled); } early_param("stack_depot_disable", disable_stack_depot); @@ -120,6 +109,15 @@ void __init stack_depot_request_early_init(void) __stack_depot_early_init_requested = true; } +/* Initialize list_head's within the hash table. */ +static void init_stack_table(unsigned long entries) +{ + unsigned long i; + + for (i = 0; i < entries; i++) + INIT_LIST_HEAD(&stack_table[i]); +} + /* Allocates a hash table via memblock. Can only be used during early boot. */ int __init stack_depot_early_init(void) { @@ -131,6 +129,15 @@ int __init stack_depot_early_init(void) __stack_depot_early_init_passed = true; /* + * Print disabled message even if early init has not been requested: + * stack_depot_init() will not print one. + */ + if (stack_depot_disabled) { + pr_info("disabled\n"); + return 0; + } + + /* * If KASAN is enabled, use the maximum order: KASAN is frequently used * in fuzzing scenarios, which leads to a large number of different * stack traces being stored in stack depot. @@ -138,21 +145,25 @@ int __init stack_depot_early_init(void) if (kasan_enabled() && !stack_bucket_number_order) stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; - if (!__stack_depot_early_init_requested || stack_depot_disabled) + /* + * Check if early init has been requested after setting + * stack_bucket_number_order: stack_depot_init() uses its value. + */ + if (!__stack_depot_early_init_requested) return 0; /* * If stack_bucket_number_order is not set, leave entries as 0 to rely - * on the automatic calculations performed by alloc_large_system_hash. + * on the automatic calculations performed by alloc_large_system_hash(). */ if (stack_bucket_number_order) entries = 1UL << stack_bucket_number_order; pr_info("allocating hash table via alloc_large_system_hash\n"); stack_table = alloc_large_system_hash("stackdepot", - sizeof(struct stack_record *), + sizeof(struct list_head), entries, STACK_HASH_TABLE_SCALE, - HASH_EARLY | HASH_ZERO, + HASH_EARLY, NULL, &stack_hash_mask, 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, @@ -162,6 +173,14 @@ int __init stack_depot_early_init(void) stack_depot_disabled = true; return -ENOMEM; } + if (!entries) { + /* + * Obtain the number of entries that was calculated by + * alloc_large_system_hash(). + */ + entries = stack_hash_mask + 1; + } + init_stack_table(entries); return 0; } @@ -202,7 +221,7 @@ int stack_depot_init(void) entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); - stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); + stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL); if (!stack_table) { pr_err("hash table allocation failed, disabling\n"); stack_depot_disabled = true; @@ -210,6 +229,7 @@ int stack_depot_init(void) goto out_unlock; } stack_hash_mask = entries - 1; + init_stack_table(entries); out_unlock: mutex_unlock(&stack_depot_init_mutex); @@ -218,104 +238,276 @@ out_unlock: } EXPORT_SYMBOL_GPL(stack_depot_init); -/* Uses preallocated memory to initialize a new stack depot pool. */ -static void depot_init_pool(void **prealloc) +/* + * Initializes new stack pool, and updates the list of pools. + */ +static bool depot_init_pool(void **prealloc) +{ + lockdep_assert_held(&pool_lock); + + if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { + /* Bail out if we reached the pool limit. */ + WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ + WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ + WARN_ONCE(1, "Stack depot reached limit capacity"); + return false; + } + + if (!new_pool && *prealloc) { + /* We have preallocated memory, use it. */ + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; + } + + if (!new_pool) + return false; /* new_pool and *prealloc are NULL */ + + /* Save reference to the pool to be used by depot_fetch_stack(). */ + stack_pools[pools_num] = new_pool; + + /* + * Stack depot tries to keep an extra pool allocated even before it runs + * out of space in the currently used pool. + * + * To indicate that a new preallocation is needed new_pool is reset to + * NULL; do not reset to NULL if we have reached the maximum number of + * pools. + */ + if (pools_num < DEPOT_MAX_POOLS) + WRITE_ONCE(new_pool, NULL); + else + WRITE_ONCE(new_pool, STACK_DEPOT_POISON); + + /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */ + WRITE_ONCE(pools_num, pools_num + 1); + ASSERT_EXCLUSIVE_WRITER(pools_num); + + pool_offset = 0; + + return true; +} + +/* Keeps the preallocated memory to be used for a new stack depot pool. */ +static void depot_keep_new_pool(void **prealloc) { + lockdep_assert_held(&pool_lock); + /* - * If the next pool is already initialized or the maximum number of + * If a new pool is already saved or the maximum number of * pools is reached, do not use the preallocated memory. - * smp_load_acquire() here pairs with smp_store_release() below and - * in depot_alloc_stack(). */ - if (!smp_load_acquire(&next_pool_required)) + if (new_pool) return; - /* Check if the current pool is not yet allocated. */ - if (stack_pools[pool_index] == NULL) { - /* Use the preallocated memory for the current pool. */ - stack_pools[pool_index] = *prealloc; - *prealloc = NULL; - } else { - /* - * Otherwise, use the preallocated memory for the next pool - * as long as we do not exceed the maximum number of pools. - */ - if (pool_index + 1 < DEPOT_MAX_POOLS) { - stack_pools[pool_index + 1] = *prealloc; - *prealloc = NULL; - } - /* - * At this point, either the next pool is initialized or the - * maximum number of pools is reached. In either case, take - * note that initializing another pool is not required. - * This smp_store_release pairs with smp_load_acquire() above - * and in stack_depot_save(). - */ - smp_store_release(&next_pool_required, 0); + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; +} + +/* + * Try to initialize a new stack record from the current pool, a cached pool, or + * the current pre-allocation. + */ +static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) +{ + struct stack_record *stack; + void *current_pool; + u32 pool_index; + + lockdep_assert_held(&pool_lock); + + if (pool_offset + size > DEPOT_POOL_SIZE) { + if (!depot_init_pool(prealloc)) + return NULL; } + + if (WARN_ON_ONCE(pools_num < 1)) + return NULL; + pool_index = pools_num - 1; + current_pool = stack_pools[pool_index]; + if (WARN_ON_ONCE(!current_pool)) + return NULL; + + stack = current_pool + pool_offset; + + /* Pre-initialize handle once. */ + stack->handle.pool_index_plus_1 = pool_index + 1; + stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; + stack->handle.extra = 0; + INIT_LIST_HEAD(&stack->hash_list); + + pool_offset += size; + + return stack; +} + +/* Try to find next free usable entry from the freelist. */ +static struct stack_record *depot_pop_free(void) +{ + struct stack_record *stack; + + lockdep_assert_held(&pool_lock); + + if (list_empty(&free_stacks)) + return NULL; + + /* + * We maintain the invariant that the elements in front are least + * recently used, and are therefore more likely to be associated with an + * RCU grace period in the past. Consequently it is sufficient to only + * check the first entry. + */ + stack = list_first_entry(&free_stacks, struct stack_record, free_list); + if (!poll_state_synchronize_rcu(stack->rcu_state)) + return NULL; + + list_del(&stack->free_list); + counters[DEPOT_COUNTER_FREELIST_SIZE]--; + + return stack; +} + +static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries) +{ + const size_t used = flex_array_size(s, entries, nr_entries); + const size_t unused = sizeof(s->entries) - used; + + WARN_ON_ONCE(sizeof(s->entries) < used); + + return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN); } /* Allocates a new stack in a stack depot pool. */ static struct stack_record * -depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) +depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) { - struct stack_record *stack; - size_t required_size = struct_size(stack, entries, size); + struct stack_record *stack = NULL; + size_t record_size; - required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN); + lockdep_assert_held(&pool_lock); - /* Check if there is not enough space in the current pool. */ - if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) { - /* Bail out if we reached the pool limit. */ - if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) { - WARN_ONCE(1, "Stack depot reached limit capacity"); - return NULL; - } + /* This should already be checked by public API entry points. */ + if (WARN_ON_ONCE(!nr_entries)) + return NULL; + /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ + if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES) + nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES; + + if (flags & STACK_DEPOT_FLAG_GET) { /* - * Move on to the next pool. - * WRITE_ONCE pairs with potential concurrent read in - * stack_depot_fetch(). - */ - WRITE_ONCE(pool_index, pool_index + 1); - pool_offset = 0; - /* - * If the maximum number of pools is not reached, take note - * that the next pool needs to initialized. - * smp_store_release() here pairs with smp_load_acquire() in - * stack_depot_save() and depot_init_pool(). + * Evictable entries have to allocate the max. size so they may + * safely be re-used by differently sized allocations. */ - if (pool_index + 1 < DEPOT_MAX_POOLS) - smp_store_release(&next_pool_required, 1); + record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES); + stack = depot_pop_free(); + } else { + record_size = depot_stack_record_size(stack, nr_entries); } - /* Assign the preallocated memory to a pool if required. */ - if (*prealloc) - depot_init_pool(prealloc); - - /* Check if we have a pool to save the stack trace. */ - if (stack_pools[pool_index] == NULL) - return NULL; + if (!stack) { + stack = depot_pop_free_pool(prealloc, record_size); + if (!stack) + return NULL; + } /* Save the stack trace. */ - stack = stack_pools[pool_index] + pool_offset; stack->hash = hash; - stack->size = size; - stack->handle.pool_index = pool_index; - stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; - stack->handle.valid = 1; - stack->handle.extra = 0; - memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); - pool_offset += required_size; + stack->size = nr_entries; + /* stack->handle is already filled in by depot_pop_free_pool(). */ + memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); + + if (flags & STACK_DEPOT_FLAG_GET) { + refcount_set(&stack->count, 1); + counters[DEPOT_COUNTER_REFD_ALLOCS]++; + counters[DEPOT_COUNTER_REFD_INUSE]++; + } else { + /* Warn on attempts to switch to refcounting this entry. */ + refcount_set(&stack->count, REFCOUNT_SATURATED); + counters[DEPOT_COUNTER_PERSIST_COUNT]++; + counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size; + } + /* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it. */ - kmsan_unpoison_memory(stack, required_size); + kmsan_unpoison_memory(stack, record_size); return stack; } +static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) +{ + const int pools_num_cached = READ_ONCE(pools_num); + union handle_parts parts = { .handle = handle }; + void *pool; + u32 pool_index = parts.pool_index_plus_1 - 1; + size_t offset = parts.offset << DEPOT_STACK_ALIGN; + struct stack_record *stack; + + lockdep_assert_not_held(&pool_lock); + + if (pool_index >= pools_num_cached) { + WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", + pool_index, pools_num_cached, handle); + return NULL; + } + + pool = stack_pools[pool_index]; + if (WARN_ON(!pool)) + return NULL; + + stack = pool + offset; + if (WARN_ON(!refcount_read(&stack->count))) + return NULL; + + return stack; +} + +/* Links stack into the freelist. */ +static void depot_free_stack(struct stack_record *stack) +{ + unsigned long flags; + + lockdep_assert_not_held(&pool_lock); + + raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); + + /* + * Remove the entry from the hash list. Concurrent list traversal may + * still observe the entry, but since the refcount is zero, this entry + * will no longer be considered as valid. + */ + list_del_rcu(&stack->hash_list); + + /* + * Due to being used from constrained contexts such as the allocators, + * NMI, or even RCU itself, stack depot cannot rely on primitives that + * would sleep (such as synchronize_rcu()) or recursively call into + * stack depot again (such as call_rcu()). + * + * Instead, get an RCU cookie, so that we can ensure this entry isn't + * moved onto another list until the next grace period, and concurrent + * RCU list traversal remains safe. + */ + stack->rcu_state = get_state_synchronize_rcu(); + + /* + * Add the entry to the freelist tail, so that older entries are + * considered first - their RCU cookie is more likely to no longer be + * associated with the current grace period. + */ + list_add_tail(&stack->free_list, &free_stacks); + + counters[DEPOT_COUNTER_FREELIST_SIZE]++; + counters[DEPOT_COUNTER_REFD_FREES]++; + counters[DEPOT_COUNTER_REFD_INUSE]--; + + printk_deferred_exit(); + raw_spin_unlock_irqrestore(&pool_lock, flags); +} + /* Calculates the hash for a stack. */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { @@ -340,32 +532,72 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, } /* Finds a stack in a bucket of the hash table. */ -static inline struct stack_record *find_stack(struct stack_record *bucket, - unsigned long *entries, int size, - u32 hash) +static inline struct stack_record *find_stack(struct list_head *bucket, + unsigned long *entries, int size, + u32 hash, depot_flags_t flags) { - struct stack_record *found; + struct stack_record *stack, *ret = NULL; + + /* + * Stack depot may be used from instrumentation that instruments RCU or + * tracing itself; use variant that does not call into RCU and cannot be + * traced. + * + * Note: Such use cases must take care when using refcounting to evict + * unused entries, because the stack record free-then-reuse code paths + * do call into RCU. + */ + rcu_read_lock_sched_notrace(); + + list_for_each_entry_rcu(stack, bucket, hash_list) { + if (stack->hash != hash || stack->size != size) + continue; + + /* + * This may race with depot_free_stack() accessing the freelist + * management state unioned with @entries. The refcount is zero + * in that case and the below refcount_inc_not_zero() will fail. + */ + if (data_race(stackdepot_memcmp(entries, stack->entries, size))) + continue; - for (found = bucket; found; found = found->next) { - if (found->hash == hash && - found->size == size && - !stackdepot_memcmp(entries, found->entries, size)) - return found; + /* + * Try to increment refcount. If this succeeds, the stack record + * is valid and has not yet been freed. + * + * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior + * to then call stack_depot_put() later, and we can assume that + * a stack record is never placed back on the freelist. + */ + if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) + continue; + + ret = stack; + break; } - return NULL; + + rcu_read_unlock_sched_notrace(); + + return ret; } -depot_stack_handle_t __stack_depot_save(unsigned long *entries, - unsigned int nr_entries, - gfp_t alloc_flags, bool can_alloc) +depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags, + depot_flags_t depot_flags) { - struct stack_record *found = NULL, **bucket; - union handle_parts retval = { .handle = 0 }; + struct list_head *bucket; + struct stack_record *found = NULL; + depot_stack_handle_t handle = 0; struct page *page = NULL; void *prealloc = NULL; + bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC; unsigned long flags; u32 hash; + if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK)) + return 0; + /* * If this stack trace is from an interrupt, including anything before * interrupt entry usually leads to unbounded stack depot growth. @@ -377,35 +609,28 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, nr_entries = filter_irq_stacks(entries, nr_entries); if (unlikely(nr_entries == 0) || stack_depot_disabled) - goto fast_exit; + return 0; hash = hash_stack(entries, nr_entries); bucket = &stack_table[hash & stack_hash_mask]; - /* - * Fast path: look the stack trace up without locking. - * The smp_load_acquire() here pairs with smp_store_release() to - * |bucket| below. - */ - found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); + /* Fast path: look the stack trace up without locking. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (found) goto exit; /* - * Check if another stack pool needs to be initialized. If so, allocate - * the memory now - we won't be able to do that under the lock. - * - * The smp_load_acquire() here pairs with smp_store_release() to - * |next_pool_inited| in depot_alloc_stack() and depot_init_pool(). + * Allocate memory for a new pool if required now: + * we won't be able to do that under the lock. */ - if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { + if (unlikely(can_alloc && !READ_ONCE(new_pool))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic - * contexts and I/O. + * contexts, I/O, nolockdep. */ alloc_flags &= ~GFP_ZONEMASK; - alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); if (page) @@ -413,29 +638,34 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, } raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); - found = find_stack(*bucket, entries, nr_entries, hash); + /* Try to find again, to avoid concurrently inserting duplicates. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (!found) { struct stack_record *new = - depot_alloc_stack(entries, nr_entries, hash, &prealloc); + depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc); if (new) { - new->next = *bucket; /* - * This smp_store_release() pairs with - * smp_load_acquire() from |bucket| above. + * This releases the stack record into the bucket and + * makes it visible to readers in find_stack(). */ - smp_store_release(bucket, new); + list_add_rcu(&new->hash_list, bucket); found = new; } - } else if (prealloc) { + } + + if (prealloc) { /* - * Stack depot already contains this stack trace, but let's - * keep the preallocated memory for the future. + * Either stack depot already contains this stack trace, or + * depot_alloc_stack() did not consume the preallocated memory. + * Try to keep the preallocated memory for future. */ - depot_init_pool(&prealloc); + depot_keep_new_pool(&prealloc); } + printk_deferred_exit(); raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { @@ -443,31 +673,23 @@ exit: free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); } if (found) - retval.handle = found->handle.handle; -fast_exit: - return retval.handle; + handle = found->handle.handle; + return handle; } -EXPORT_SYMBOL_GPL(__stack_depot_save); +EXPORT_SYMBOL_GPL(stack_depot_save_flags); depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) { - return __stack_depot_save(entries, nr_entries, alloc_flags, true); + return stack_depot_save_flags(entries, nr_entries, alloc_flags, + STACK_DEPOT_FLAG_CAN_ALLOC); } EXPORT_SYMBOL_GPL(stack_depot_save); unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { - union handle_parts parts = { .handle = handle }; - /* - * READ_ONCE pairs with potential concurrent write in - * depot_alloc_stack. - */ - int pool_index_cached = READ_ONCE(pool_index); - void *pool; - size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; *entries = NULL; @@ -477,24 +699,42 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, */ kmsan_unpoison_memory(entries, sizeof(*entries)); - if (!handle) + if (!handle || stack_depot_disabled) return 0; - if (parts.pool_index > pool_index_cached) { - WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pool_index_cached, handle); - return 0; - } - pool = stack_pools[parts.pool_index]; - if (!pool) + stack = depot_fetch_stack(handle); + /* + * Should never be NULL, otherwise this is a use-after-put (or just a + * corrupt handle). + */ + if (WARN(!stack, "corrupt handle or use after stack_depot_put()")) return 0; - stack = pool + offset; *entries = stack->entries; return stack->size; } EXPORT_SYMBOL_GPL(stack_depot_fetch); +void stack_depot_put(depot_stack_handle_t handle) +{ + struct stack_record *stack; + + if (!handle || stack_depot_disabled) + return; + + stack = depot_fetch_stack(handle); + /* + * Should always be able to find the stack record, otherwise this is an + * unbalanced put attempt (or corrupt handle). + */ + if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()")) + return; + + if (refcount_dec_and_test(&stack->count)) + depot_free_stack(stack); +} +EXPORT_SYMBOL_GPL(stack_depot_put); + void stack_depot_print(depot_stack_handle_t stack) { unsigned long *entries; @@ -539,3 +779,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) return parts.extra; } EXPORT_SYMBOL(stack_depot_get_extra_bits); + +static int stats_show(struct seq_file *seq, void *v) +{ + /* + * data race ok: These are just statistics counters, and approximate + * statistics are ok for debugging. + */ + seq_printf(seq, "pools: %d\n", data_race(pools_num)); + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +static int depot_debugfs_init(void) +{ + struct dentry *dir; + + if (stack_depot_disabled) + return 0; + + dir = debugfs_create_dir("stackdepot", NULL); + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); + return 0; +} +late_initcall(depot_debugfs_init); |