diff options
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r-- | lib/stackdepot.c | 902 |
1 files changed, 724 insertions, 178 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 66cab785bea0..4a7055a63d9f 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -1,162 +1,565 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Generic stack depot for storing stack traces. + * Stack depot - a stack trace storage that avoids duplication. * - * Some debugging tools need to save stack traces of certain events which can - * be later presented to the user. For example, KASAN needs to safe alloc and - * free stacks for each object, but storing two stack traces per object - * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for - * that). - * - * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc - * and free stacks repeat a lot, we save about 100x space. - * Stacks are never removed from depot, so we store them contiguously one after - * another in a contiguos memory allocation. + * Internally, stack depot maintains a hash table of unique stacktraces. The + * stack traces themselves are stored contiguously one after another in a set + * of separate page allocations. * * Author: Alexander Potapenko <glider@google.com> * Copyright (C) 2016 Google, Inc. * - * Based on code by Dmitry Chernenkov. + * Based on the code by Dmitry Chernenkov. */ +#define pr_fmt(fmt) "stackdepot: " fmt + +#include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/jhash.h> #include <linux/kernel.h> +#include <linux/kmsan.h> +#include <linux/list.h> #include <linux/mm.h> -#include <linux/percpu.h> +#include <linux/mutex.h> +#include <linux/poison.h> #include <linux/printk.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/memblock.h> +#include <linux/kasan-enabled.h> + +#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) -#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) - -#define STACK_ALLOC_NULL_PROTECTION_BITS 1 -#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ -#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) -#define STACK_ALLOC_ALIGN 4 -#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ - STACK_ALLOC_ALIGN) -#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ - STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) -#define STACK_ALLOC_SLABS_CAP 8192 -#define STACK_ALLOC_MAX_SLABS \ - (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ - (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) - -/* The compact structure to store the reference to stacks. */ +#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ +#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) +#define DEPOT_STACK_ALIGN 4 +#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) +#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \ + STACK_DEPOT_EXTRA_BITS) +#define DEPOT_POOLS_CAP 8192 +#define DEPOT_MAX_POOLS \ + (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ + (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) + +/* Compact structure that stores a reference to a stack. */ union handle_parts { depot_stack_handle_t handle; struct { - u32 slabindex : STACK_ALLOC_INDEX_BITS; - u32 offset : STACK_ALLOC_OFFSET_BITS; - u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; + u32 pool_index : DEPOT_POOL_INDEX_BITS; + u32 offset : DEPOT_OFFSET_BITS; + u32 extra : STACK_DEPOT_EXTRA_BITS; }; }; struct stack_record { - struct stack_record *next; /* Link in the hashtable */ - u32 hash; /* Hash in the hastable */ - u32 size; /* Number of frames in the stack */ - union handle_parts handle; - unsigned long entries[1]; /* Variable-sized array of entries. */ + struct list_head hash_list; /* Links in the hash table */ + u32 hash; /* Hash in hash table */ + u32 size; /* Number of stored frames */ + union handle_parts handle; /* Constant after initialization */ + refcount_t count; + union { + unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ + struct { + /* + * An important invariant of the implementation is to + * only place a stack record onto the freelist iff its + * refcount is zero. Because stack records with a zero + * refcount are never considered as valid, it is safe to + * union @entries and freelist management state below. + * Conversely, as soon as an entry is off the freelist + * and its refcount becomes non-zero, the below must not + * be accessed until being placed back on the freelist. + */ + struct list_head free_list; /* Links in the freelist */ + unsigned long rcu_state; /* RCU cookie */ + }; + }; }; -static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; +static bool stack_depot_disabled; +static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); +static bool __stack_depot_early_init_passed __initdata; + +/* Use one hash table bucket per 16 KB of memory. */ +#define STACK_HASH_TABLE_SCALE 14 +/* Limit the number of buckets between 4K and 1M. */ +#define STACK_BUCKET_NUMBER_ORDER_MIN 12 +#define STACK_BUCKET_NUMBER_ORDER_MAX 20 +/* Initial seed for jhash2. */ +#define STACK_HASH_SEED 0x9747b28c + +/* Hash table of stored stack records. */ +static struct list_head *stack_table; +/* Fixed order of the number of table buckets. Used when KASAN is enabled. */ +static unsigned int stack_bucket_number_order; +/* Hash mask for indexing the table. */ +static unsigned int stack_hash_mask; -static int depot_index; -static int next_slab_inited; -static size_t depot_offset; -static DEFINE_SPINLOCK(depot_lock); +/* Array of memory regions that store stack records. */ +static void *stack_pools[DEPOT_MAX_POOLS]; +/* Newly allocated pool that is not yet added to stack_pools. */ +static void *new_pool; +/* Number of pools in stack_pools. */ +static int pools_num; +/* Offset to the unused space in the currently used pool. */ +static size_t pool_offset = DEPOT_POOL_SIZE; +/* Freelist of stack records within stack_pools. */ +static LIST_HEAD(free_stacks); +/* The lock must be held when performing pool or freelist modifications. */ +static DEFINE_RAW_SPINLOCK(pool_lock); -static bool init_stack_slab(void **prealloc) +/* Statistics counters for debugfs. */ +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS, + DEPOT_COUNTER_REFD_FREES, + DEPOT_COUNTER_REFD_INUSE, + DEPOT_COUNTER_FREELIST_SIZE, + DEPOT_COUNTER_PERSIST_COUNT, + DEPOT_COUNTER_PERSIST_BYTES, + DEPOT_COUNTER_COUNT, +}; +static long counters[DEPOT_COUNTER_COUNT]; +static const char *const counter_names[] = { + [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations", + [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees", + [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use", + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", + [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count", + [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes", +}; +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); + +static int __init disable_stack_depot(char *str) { - if (!*prealloc) - return false; + return kstrtobool(str, &stack_depot_disabled); +} +early_param("stack_depot_disable", disable_stack_depot); + +void __init stack_depot_request_early_init(void) +{ + /* Too late to request early init now. */ + WARN_ON(__stack_depot_early_init_passed); + + __stack_depot_early_init_requested = true; +} + +/* Initialize list_head's within the hash table. */ +static void init_stack_table(unsigned long entries) +{ + unsigned long i; + + for (i = 0; i < entries; i++) + INIT_LIST_HEAD(&stack_table[i]); +} + +/* Allocates a hash table via memblock. Can only be used during early boot. */ +int __init stack_depot_early_init(void) +{ + unsigned long entries = 0; + + /* This function must be called only once, from mm_init(). */ + if (WARN_ON(__stack_depot_early_init_passed)) + return 0; + __stack_depot_early_init_passed = true; + /* - * This smp_load_acquire() pairs with smp_store_release() to - * |next_slab_inited| below and in depot_alloc_stack(). + * Print disabled message even if early init has not been requested: + * stack_depot_init() will not print one. */ - if (smp_load_acquire(&next_slab_inited)) - return true; - if (stack_slabs[depot_index] == NULL) { - stack_slabs[depot_index] = *prealloc; - } else { - stack_slabs[depot_index + 1] = *prealloc; + if (stack_depot_disabled) { + pr_info("disabled\n"); + return 0; + } + + /* + * If KASAN is enabled, use the maximum order: KASAN is frequently used + * in fuzzing scenarios, which leads to a large number of different + * stack traces being stored in stack depot. + */ + if (kasan_enabled() && !stack_bucket_number_order) + stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; + + /* + * Check if early init has been requested after setting + * stack_bucket_number_order: stack_depot_init() uses its value. + */ + if (!__stack_depot_early_init_requested) + return 0; + + /* + * If stack_bucket_number_order is not set, leave entries as 0 to rely + * on the automatic calculations performed by alloc_large_system_hash(). + */ + if (stack_bucket_number_order) + entries = 1UL << stack_bucket_number_order; + pr_info("allocating hash table via alloc_large_system_hash\n"); + stack_table = alloc_large_system_hash("stackdepot", + sizeof(struct list_head), + entries, + STACK_HASH_TABLE_SCALE, + HASH_EARLY, + NULL, + &stack_hash_mask, + 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, + 1UL << STACK_BUCKET_NUMBER_ORDER_MAX); + if (!stack_table) { + pr_err("hash table allocation failed, disabling\n"); + stack_depot_disabled = true; + return -ENOMEM; + } + if (!entries) { /* - * This smp_store_release pairs with smp_load_acquire() from - * |next_slab_inited| above and in depot_save_stack(). + * Obtain the number of entries that was calculated by + * alloc_large_system_hash(). */ - smp_store_release(&next_slab_inited, 1); + entries = stack_hash_mask + 1; } - *prealloc = NULL; + init_stack_table(entries); + + return 0; +} + +/* Allocates a hash table via kvcalloc. Can be used after boot. */ +int stack_depot_init(void) +{ + static DEFINE_MUTEX(stack_depot_init_mutex); + unsigned long entries; + int ret = 0; + + mutex_lock(&stack_depot_init_mutex); + + if (stack_depot_disabled || stack_table) + goto out_unlock; + + /* + * Similarly to stack_depot_early_init, use stack_bucket_number_order + * if assigned, and rely on automatic scaling otherwise. + */ + if (stack_bucket_number_order) { + entries = 1UL << stack_bucket_number_order; + } else { + int scale = STACK_HASH_TABLE_SCALE; + + entries = nr_free_buffer_pages(); + entries = roundup_pow_of_two(entries); + + if (scale > PAGE_SHIFT) + entries >>= (scale - PAGE_SHIFT); + else + entries <<= (PAGE_SHIFT - scale); + } + + if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN) + entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN; + if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX) + entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; + + pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); + stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL); + if (!stack_table) { + pr_err("hash table allocation failed, disabling\n"); + stack_depot_disabled = true; + ret = -ENOMEM; + goto out_unlock; + } + stack_hash_mask = entries - 1; + init_stack_table(entries); + +out_unlock: + mutex_unlock(&stack_depot_init_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(stack_depot_init); + +/* + * Initializes new stack pool, and updates the list of pools. + */ +static bool depot_init_pool(void **prealloc) +{ + lockdep_assert_held(&pool_lock); + + if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { + /* Bail out if we reached the pool limit. */ + WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ + WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ + WARN_ONCE(1, "Stack depot reached limit capacity"); + return false; + } + + if (!new_pool && *prealloc) { + /* We have preallocated memory, use it. */ + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; + } + + if (!new_pool) + return false; /* new_pool and *prealloc are NULL */ + + /* Save reference to the pool to be used by depot_fetch_stack(). */ + stack_pools[pools_num] = new_pool; + + /* + * Stack depot tries to keep an extra pool allocated even before it runs + * out of space in the currently used pool. + * + * To indicate that a new preallocation is needed new_pool is reset to + * NULL; do not reset to NULL if we have reached the maximum number of + * pools. + */ + if (pools_num < DEPOT_MAX_POOLS) + WRITE_ONCE(new_pool, NULL); + else + WRITE_ONCE(new_pool, STACK_DEPOT_POISON); + + /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */ + WRITE_ONCE(pools_num, pools_num + 1); + ASSERT_EXCLUSIVE_WRITER(pools_num); + + pool_offset = 0; + return true; } -/* Allocation of a new stack in raw storage */ -static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, - u32 hash, void **prealloc, gfp_t alloc_flags) +/* Keeps the preallocated memory to be used for a new stack depot pool. */ +static void depot_keep_new_pool(void **prealloc) +{ + lockdep_assert_held(&pool_lock); + + /* + * If a new pool is already saved or the maximum number of + * pools is reached, do not use the preallocated memory. + */ + if (new_pool) + return; + + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; +} + +/* + * Try to initialize a new stack record from the current pool, a cached pool, or + * the current pre-allocation. + */ +static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) { - int required_size = offsetof(struct stack_record, entries) + - sizeof(unsigned long) * size; struct stack_record *stack; + void *current_pool; + u32 pool_index; - required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); + lockdep_assert_held(&pool_lock); - if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { - if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { - WARN_ONCE(1, "Stack depot reached limit capacity"); + if (pool_offset + size > DEPOT_POOL_SIZE) { + if (!depot_init_pool(prealloc)) return NULL; - } - depot_index++; - depot_offset = 0; + } + + if (WARN_ON_ONCE(pools_num < 1)) + return NULL; + pool_index = pools_num - 1; + current_pool = stack_pools[pool_index]; + if (WARN_ON_ONCE(!current_pool)) + return NULL; + + stack = current_pool + pool_offset; + + /* Pre-initialize handle once. */ + stack->handle.pool_index = pool_index; + stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; + stack->handle.extra = 0; + INIT_LIST_HEAD(&stack->hash_list); + + pool_offset += size; + + return stack; +} + +/* Try to find next free usable entry from the freelist. */ +static struct stack_record *depot_pop_free(void) +{ + struct stack_record *stack; + + lockdep_assert_held(&pool_lock); + + if (list_empty(&free_stacks)) + return NULL; + + /* + * We maintain the invariant that the elements in front are least + * recently used, and are therefore more likely to be associated with an + * RCU grace period in the past. Consequently it is sufficient to only + * check the first entry. + */ + stack = list_first_entry(&free_stacks, struct stack_record, free_list); + if (!poll_state_synchronize_rcu(stack->rcu_state)) + return NULL; + + list_del(&stack->free_list); + counters[DEPOT_COUNTER_FREELIST_SIZE]--; + + return stack; +} + +static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries) +{ + const size_t used = flex_array_size(s, entries, nr_entries); + const size_t unused = sizeof(s->entries) - used; + + WARN_ON_ONCE(sizeof(s->entries) < used); + + return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN); +} + +/* Allocates a new stack in a stack depot pool. */ +static struct stack_record * +depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) +{ + struct stack_record *stack = NULL; + size_t record_size; + + lockdep_assert_held(&pool_lock); + + /* This should already be checked by public API entry points. */ + if (WARN_ON_ONCE(!nr_entries)) + return NULL; + + /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ + if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES) + nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES; + + if (flags & STACK_DEPOT_FLAG_GET) { /* - * smp_store_release() here pairs with smp_load_acquire() from - * |next_slab_inited| in depot_save_stack() and - * init_stack_slab(). + * Evictable entries have to allocate the max. size so they may + * safely be re-used by differently sized allocations. */ - if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) - smp_store_release(&next_slab_inited, 0); + record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES); + stack = depot_pop_free(); + } else { + record_size = depot_stack_record_size(stack, nr_entries); } - init_stack_slab(prealloc); - if (stack_slabs[depot_index] == NULL) - return NULL; - stack = stack_slabs[depot_index] + depot_offset; + if (!stack) { + stack = depot_pop_free_pool(prealloc, record_size); + if (!stack) + return NULL; + } + /* Save the stack trace. */ stack->hash = hash; - stack->size = size; - stack->handle.slabindex = depot_index; - stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; - stack->handle.valid = 1; - memcpy(stack->entries, entries, size * sizeof(unsigned long)); - depot_offset += required_size; + stack->size = nr_entries; + /* stack->handle is already filled in by depot_pop_free_pool(). */ + memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); + + if (flags & STACK_DEPOT_FLAG_GET) { + refcount_set(&stack->count, 1); + counters[DEPOT_COUNTER_REFD_ALLOCS]++; + counters[DEPOT_COUNTER_REFD_INUSE]++; + } else { + /* Warn on attempts to switch to refcounting this entry. */ + refcount_set(&stack->count, REFCOUNT_SATURATED); + counters[DEPOT_COUNTER_PERSIST_COUNT]++; + counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size; + } + + /* + * Let KMSAN know the stored stack record is initialized. This shall + * prevent false positive reports if instrumented code accesses it. + */ + kmsan_unpoison_memory(stack, record_size); return stack; } -#define STACK_HASH_ORDER 20 -#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) -#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) -#define STACK_HASH_SEED 0x9747b28c +static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) +{ + const int pools_num_cached = READ_ONCE(pools_num); + union handle_parts parts = { .handle = handle }; + void *pool; + size_t offset = parts.offset << DEPOT_STACK_ALIGN; + struct stack_record *stack; -static struct stack_record *stack_table[STACK_HASH_SIZE] = { - [0 ... STACK_HASH_SIZE - 1] = NULL -}; + lockdep_assert_not_held(&pool_lock); + + if (parts.pool_index > pools_num_cached) { + WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", + parts.pool_index, pools_num_cached, handle); + return NULL; + } + + pool = stack_pools[parts.pool_index]; + if (WARN_ON(!pool)) + return NULL; + + stack = pool + offset; + if (WARN_ON(!refcount_read(&stack->count))) + return NULL; + + return stack; +} + +/* Links stack into the freelist. */ +static void depot_free_stack(struct stack_record *stack) +{ + unsigned long flags; + + lockdep_assert_not_held(&pool_lock); + + raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); -/* Calculate hash for a stack */ + /* + * Remove the entry from the hash list. Concurrent list traversal may + * still observe the entry, but since the refcount is zero, this entry + * will no longer be considered as valid. + */ + list_del_rcu(&stack->hash_list); + + /* + * Due to being used from constrained contexts such as the allocators, + * NMI, or even RCU itself, stack depot cannot rely on primitives that + * would sleep (such as synchronize_rcu()) or recursively call into + * stack depot again (such as call_rcu()). + * + * Instead, get an RCU cookie, so that we can ensure this entry isn't + * moved onto another list until the next grace period, and concurrent + * RCU list traversal remains safe. + */ + stack->rcu_state = get_state_synchronize_rcu(); + + /* + * Add the entry to the freelist tail, so that older entries are + * considered first - their RCU cookie is more likely to no longer be + * associated with the current grace period. + */ + list_add_tail(&stack->free_list, &free_stacks); + + counters[DEPOT_COUNTER_FREELIST_SIZE]++; + counters[DEPOT_COUNTER_REFD_FREES]++; + counters[DEPOT_COUNTER_REFD_INUSE]--; + + printk_deferred_exit(); + raw_spin_unlock_irqrestore(&pool_lock, flags); +} + +/* Calculates the hash for a stack. */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { return jhash2((u32 *)entries, - size * sizeof(unsigned long) / sizeof(u32), - STACK_HASH_SEED); + array_size(size, sizeof(*entries)) / sizeof(u32), + STACK_HASH_SEED); } -/* Use our own, non-instrumented version of memcmp(). - * - * We actually don't care about the order, just the equality. +/* + * Non-instrumented version of memcmp(). + * Does not check the lexicographical order, only the equality. */ static inline int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, @@ -169,89 +572,99 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, return 0; } -/* Find a stack that is equal to the one stored in entries in the hash */ -static inline struct stack_record *find_stack(struct stack_record *bucket, - unsigned long *entries, int size, - u32 hash) +/* Finds a stack in a bucket of the hash table. */ +static inline struct stack_record *find_stack(struct list_head *bucket, + unsigned long *entries, int size, + u32 hash, depot_flags_t flags) { - struct stack_record *found; + struct stack_record *stack, *ret = NULL; - for (found = bucket; found; found = found->next) { - if (found->hash == hash && - found->size == size && - !stackdepot_memcmp(entries, found->entries, size)) - return found; + /* + * Stack depot may be used from instrumentation that instruments RCU or + * tracing itself; use variant that does not call into RCU and cannot be + * traced. + * + * Note: Such use cases must take care when using refcounting to evict + * unused entries, because the stack record free-then-reuse code paths + * do call into RCU. + */ + rcu_read_lock_sched_notrace(); + + list_for_each_entry_rcu(stack, bucket, hash_list) { + if (stack->hash != hash || stack->size != size) + continue; + + /* + * This may race with depot_free_stack() accessing the freelist + * management state unioned with @entries. The refcount is zero + * in that case and the below refcount_inc_not_zero() will fail. + */ + if (data_race(stackdepot_memcmp(entries, stack->entries, size))) + continue; + + /* + * Try to increment refcount. If this succeeds, the stack record + * is valid and has not yet been freed. + * + * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior + * to then call stack_depot_put() later, and we can assume that + * a stack record is never placed back on the freelist. + */ + if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) + continue; + + ret = stack; + break; } - return NULL; -} -/** - * stack_depot_fetch - Fetch stack entries from a depot - * - * @handle: Stack depot handle which was returned from - * stack_depot_save(). - * @entries: Pointer to store the entries address - * - * Return: The number of trace entries for this depot. - */ -unsigned int stack_depot_fetch(depot_stack_handle_t handle, - unsigned long **entries) -{ - union handle_parts parts = { .handle = handle }; - void *slab = stack_slabs[parts.slabindex]; - size_t offset = parts.offset << STACK_ALLOC_ALIGN; - struct stack_record *stack = slab + offset; + rcu_read_unlock_sched_notrace(); - *entries = stack->entries; - return stack->size; + return ret; } -EXPORT_SYMBOL_GPL(stack_depot_fetch); -/** - * stack_depot_save - Save a stack trace from an array - * - * @entries: Pointer to storage array - * @nr_entries: Size of the storage array - * @alloc_flags: Allocation gfp flags - * - * Return: The handle of the stack struct stored in depot - */ -depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, - gfp_t alloc_flags) +depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags, + depot_flags_t depot_flags) { - struct stack_record *found = NULL, **bucket; - depot_stack_handle_t retval = 0; + struct list_head *bucket; + struct stack_record *found = NULL; + depot_stack_handle_t handle = 0; struct page *page = NULL; void *prealloc = NULL; + bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC; unsigned long flags; u32 hash; - if (unlikely(nr_entries == 0)) - goto fast_exit; - - hash = hash_stack(entries, nr_entries); - bucket = &stack_table[hash & STACK_HASH_MASK]; + if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK)) + return 0; /* - * Fast path: look the stack trace up without locking. - * The smp_load_acquire() here pairs with smp_store_release() to - * |bucket| below. + * If this stack trace is from an interrupt, including anything before + * interrupt entry usually leads to unbounded stack depot growth. + * + * Since use of filter_irq_stacks() is a requirement to ensure stack + * depot can efficiently deduplicate interrupt stacks, always + * filter_irq_stacks() to simplify all callers' use of stack depot. */ - found = find_stack(smp_load_acquire(bucket), entries, - nr_entries, hash); + nr_entries = filter_irq_stacks(entries, nr_entries); + + if (unlikely(nr_entries == 0) || stack_depot_disabled) + return 0; + + hash = hash_stack(entries, nr_entries); + bucket = &stack_table[hash & stack_hash_mask]; + + /* Fast path: look the stack trace up without locking. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (found) goto exit; /* - * Check if the current or the next stack slab need to be initialized. - * If so, allocate the memory - we won't be able to do that under the - * lock. - * - * The smp_load_acquire() here pairs with smp_store_release() to - * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). + * Allocate memory for a new pool if required now: + * we won't be able to do that under the lock. */ - if (unlikely(!smp_load_acquire(&next_slab_inited))) { + if (unlikely(can_alloc && !READ_ONCE(new_pool))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic @@ -260,44 +673,177 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); alloc_flags |= __GFP_NOWARN; - page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); + page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); if (page) prealloc = page_address(page); } - spin_lock_irqsave(&depot_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); - found = find_stack(*bucket, entries, nr_entries, hash); + /* Try to find again, to avoid concurrently inserting duplicates. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (!found) { struct stack_record *new = - depot_alloc_stack(entries, nr_entries, - hash, &prealloc, alloc_flags); + depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc); + if (new) { - new->next = *bucket; /* - * This smp_store_release() pairs with - * smp_load_acquire() from |bucket| above. + * This releases the stack record into the bucket and + * makes it visible to readers in find_stack(). */ - smp_store_release(bucket, new); + list_add_rcu(&new->hash_list, bucket); found = new; } - } else if (prealloc) { + } + + if (prealloc) { /* - * We didn't need to store this stack trace, but let's keep - * the preallocated memory for the future. + * Either stack depot already contains this stack trace, or + * depot_alloc_stack() did not consume the preallocated memory. + * Try to keep the preallocated memory for future. */ - WARN_ON(!init_stack_slab(&prealloc)); + depot_keep_new_pool(&prealloc); } - spin_unlock_irqrestore(&depot_lock, flags); + printk_deferred_exit(); + raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { - /* Nobody used this memory, ok to free it. */ - free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); + /* Stack depot didn't use this memory, free it. */ + free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); } if (found) - retval = found->handle.handle; -fast_exit: - return retval; + handle = found->handle.handle; + return handle; +} +EXPORT_SYMBOL_GPL(stack_depot_save_flags); + +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags) +{ + return stack_depot_save_flags(entries, nr_entries, alloc_flags, + STACK_DEPOT_FLAG_CAN_ALLOC); } EXPORT_SYMBOL_GPL(stack_depot_save); + +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries) +{ + struct stack_record *stack; + + *entries = NULL; + /* + * Let KMSAN know *entries is initialized. This shall prevent false + * positive reports if instrumented code accesses it. + */ + kmsan_unpoison_memory(entries, sizeof(*entries)); + + if (!handle || stack_depot_disabled) + return 0; + + stack = depot_fetch_stack(handle); + /* + * Should never be NULL, otherwise this is a use-after-put (or just a + * corrupt handle). + */ + if (WARN(!stack, "corrupt handle or use after stack_depot_put()")) + return 0; + + *entries = stack->entries; + return stack->size; +} +EXPORT_SYMBOL_GPL(stack_depot_fetch); + +void stack_depot_put(depot_stack_handle_t handle) +{ + struct stack_record *stack; + + if (!handle || stack_depot_disabled) + return; + + stack = depot_fetch_stack(handle); + /* + * Should always be able to find the stack record, otherwise this is an + * unbalanced put attempt (or corrupt handle). + */ + if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()")) + return; + + if (refcount_dec_and_test(&stack->count)) + depot_free_stack(stack); +} +EXPORT_SYMBOL_GPL(stack_depot_put); + +void stack_depot_print(depot_stack_handle_t stack) +{ + unsigned long *entries; + unsigned int nr_entries; + + nr_entries = stack_depot_fetch(stack, &entries); + if (nr_entries > 0) + stack_trace_print(entries, nr_entries, 0); +} +EXPORT_SYMBOL_GPL(stack_depot_print); + +int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, + int spaces) +{ + unsigned long *entries; + unsigned int nr_entries; + + nr_entries = stack_depot_fetch(handle, &entries); + return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, + spaces) : 0; +} +EXPORT_SYMBOL_GPL(stack_depot_snprint); + +depot_stack_handle_t __must_check stack_depot_set_extra_bits( + depot_stack_handle_t handle, unsigned int extra_bits) +{ + union handle_parts parts = { .handle = handle }; + + /* Don't set extra bits on empty handles. */ + if (!handle) + return 0; + + parts.extra = extra_bits; + return parts.handle; +} +EXPORT_SYMBOL(stack_depot_set_extra_bits); + +unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) +{ + union handle_parts parts = { .handle = handle }; + + return parts.extra; +} +EXPORT_SYMBOL(stack_depot_get_extra_bits); + +static int stats_show(struct seq_file *seq, void *v) +{ + /* + * data race ok: These are just statistics counters, and approximate + * statistics are ok for debugging. + */ + seq_printf(seq, "pools: %d\n", data_race(pools_num)); + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +static int depot_debugfs_init(void) +{ + struct dentry *dir; + + if (stack_depot_disabled) + return 0; + + dir = debugfs_create_dir("stackdepot", NULL); + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); + return 0; +} +late_initcall(depot_debugfs_init); |