aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/clz_ctz.c32
-rw-r--r--lib/cpu_rmap.c5
-rw-r--r--lib/debugobjects.c309
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject.c17
-rw-r--r--lib/mpi/longlong.h36
-rw-r--r--lib/mpi/mpi-cmp.c8
-rw-r--r--lib/mpi/mpicoder.c3
-rw-r--r--lib/notifier-error-inject.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/test_firmware.c56
-rw-r--r--lib/ts_bm.c4
-rw-r--r--lib/usercopy.c7
14 files changed, 304 insertions, 181 deletions
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 2e11e48446ab..ca0582d33532 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -30,36 +30,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
-int __weak __clzdi2(long val);
-int __weak __ctzdi2(long val);
-#if BITS_PER_LONG == 32
-
-int __weak __clzdi2(long val)
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
{
- return 32 - fls((int)val);
+ return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
-int __weak __ctzdi2(long val)
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
{
- return __ffs((u32)val);
+ return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
-
-#elif BITS_PER_LONG == 64
-
-int __weak __clzdi2(long val)
-{
- return 64 - fls64((u64)val);
-}
-EXPORT_SYMBOL(__clzdi2);
-
-int __weak __ctzdi2(long val)
-{
- return __ffs64((u64)val);
-}
-EXPORT_SYMBOL(__ctzdi2);
-
-#else
-#error BITS_PER_LONG not 32 or 64
-#endif
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index f610b2a10b3e..a0de1b2579f7 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -235,7 +235,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
for (index = 0; index < rmap->used; index++) {
glue = rmap->obj[index];
- irq_set_affinity_notifier(glue->notify.irq, NULL);
+ if (glue)
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
@@ -270,6 +271,7 @@ static void irq_cpu_rmap_release(struct kref *ref)
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
+ glue->rmap->obj[glue->index] = NULL;
cpu_rmap_put(glue->rmap);
kfree(glue);
}
@@ -300,6 +302,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) {
cpu_rmap_put(glue->rmap);
+ rmap->obj[glue->index] = NULL;
kfree(glue);
}
return rc;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 14afeeb7d6ef..62d095fd0c52 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -25,6 +25,7 @@
#define ODEBUG_POOL_SIZE 1024
#define ODEBUG_POOL_MIN_LEVEL 256
+#define ODEBUG_POOL_PERCPU_SIZE 64
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
@@ -35,6 +36,17 @@ struct debug_bucket {
raw_spinlock_t lock;
};
+/*
+ * Debug object percpu free list
+ * Access is protected by disabling irq
+ */
+struct debug_percpu_free {
+ struct hlist_head free_objs;
+ int obj_free;
+};
+
+static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
+
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
@@ -44,13 +56,19 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
static HLIST_HEAD(obj_to_free);
+/*
+ * Because of the presence of percpu free pools, obj_pool_free will
+ * under-count those in the percpu free pools. Similarly, obj_pool_used
+ * will over-count those in the percpu free pools. Adjustments will be
+ * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
+ * can be off.
+ */
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
static int __maybe_unused debug_objects_maxchecked __read_mostly;
@@ -63,6 +81,7 @@ static int debug_objects_pool_size __read_mostly
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
static struct debug_obj_descr *descr_test __read_mostly;
+static struct kmem_cache *obj_cache __read_mostly;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -163,26 +182,38 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
+ * Allocate a new object from the hlist
*/
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+static struct debug_obj *__alloc_object(struct hlist_head *list)
{
struct debug_obj *obj = NULL;
- raw_spin_lock(&pool_lock);
- if (obj_pool.first) {
- obj = hlist_entry(obj_pool.first, typeof(*obj), node);
-
- obj->object = addr;
- obj->descr = descr;
- obj->state = ODEBUG_STATE_NONE;
- obj->astate = 0;
+ if (list->first) {
+ obj = hlist_entry(list->first, typeof(*obj), node);
hlist_del(&obj->node);
+ }
- hlist_add_head(&obj->node, &b->list);
+ return obj;
+}
+
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+{
+ struct debug_percpu_free *percpu_pool;
+ struct debug_obj *obj;
+
+ if (likely(obj_cache)) {
+ percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ obj = __alloc_object(&percpu_pool->free_objs);
+ if (obj) {
+ percpu_pool->obj_free--;
+ goto init_obj;
+ }
+ }
+ raw_spin_lock(&pool_lock);
+ obj = __alloc_object(&obj_pool);
+ if (obj) {
obj_pool_used++;
if (obj_pool_used > obj_pool_max_used)
obj_pool_max_used = obj_pool_used;
@@ -193,6 +224,14 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
}
raw_spin_unlock(&pool_lock);
+init_obj:
+ if (obj) {
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ obj->astate = 0;
+ hlist_add_head(&obj->node, &b->list);
+ }
return obj;
}
@@ -247,8 +286,21 @@ static bool __free_object(struct debug_obj *obj)
{
unsigned long flags;
bool work;
+ struct debug_percpu_free *percpu_pool;
- raw_spin_lock_irqsave(&pool_lock, flags);
+ local_irq_save(flags);
+ /*
+ * Try to free it into the percpu pool first.
+ */
+ percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ if (obj_cache && percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
+ hlist_add_head(&obj->node, &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ local_irq_restore(flags);
+ return false;
+ }
+
+ raw_spin_lock(&pool_lock);
work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
obj_pool_used--;
@@ -259,7 +311,8 @@ static bool __free_object(struct debug_obj *obj)
obj_pool_free++;
hlist_add_head(&obj->node, &obj_pool);
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock(&pool_lock);
+ local_irq_restore(flags);
return work;
}
@@ -318,6 +371,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
struct debug_obj_descr *descr = obj->descr;
static int limit;
+ /*
+ * Don't report if lookup_object_or_alloc() by the current thread
+ * failed because lookup_object_or_alloc()/debug_objects_oom() by a
+ * concurrent thread turned off debug_objects_enabled and cleared
+ * the hash buckets.
+ */
+ if (!debug_objects_enabled)
+ return;
+
if (limit < 5 && descr != descr_test) {
void *hint = descr->debug_hint ?
descr->debug_hint(obj->object) : NULL;
@@ -368,6 +430,55 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
+}
+
static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
@@ -376,22 +487,17 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ debug_objects_fill_pool();
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- debug_object_is_on_stack(addr, onstack);
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}
switch (obj->state) {
@@ -402,15 +508,16 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "init");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "init");
debug_object_fixup(descr->fixup_init, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_print_object(obj, "init");
- break;
+ return;
default:
break;
}
@@ -455,24 +562,26 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
if (!debug_objects_enabled)
return 0;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
+ bool print_object = false;
+
switch (obj->state) {
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
@@ -481,14 +590,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "activate");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "activate");
ret = debug_object_fixup(descr->fixup_activate, addr, state);
return ret ? 0 : -EINVAL;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "activate");
+ print_object = true;
ret = -EINVAL;
break;
default:
@@ -496,28 +605,23 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "activate");
return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
}
- return 0;
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -531,6 +635,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -548,24 +653,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
if (!obj->astate)
obj->state = ODEBUG_STATE_INACTIVE;
else
- debug_print_object(obj, "deactivate");
+ print_object = true;
break;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "deactivate");
+ print_object = true;
break;
default:
break;
}
- } else {
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "deactivate");
+ } else if (print_object) {
+ debug_print_object(obj, "deactivate");
}
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -580,6 +688,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -599,20 +708,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
obj->state = ODEBUG_STATE_DESTROYED;
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "destroy");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "destroy");
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "destroy");
+ print_object = true;
break;
default:
break;
}
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "destroy");
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -641,9 +752,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
@@ -664,6 +775,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -671,34 +783,25 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);
@@ -716,6 +819,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -731,22 +835,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
if (obj->astate == expect)
obj->astate = next;
else
- debug_print_object(obj, "active_state");
+ print_object = true;
break;
default:
- debug_print_object(obj, "active_state");
+ print_object = true;
break;
}
- } else {
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "active_state");
+ } else if (print_object) {
+ debug_print_object(obj, "active_state");
}
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -782,10 +889,10 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -822,13 +929,19 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
+ int cpu, obj_percpu_free = 0;
+
+ for_each_possible_cpu(cpu)
+ obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
+
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
- seq_printf(m, "pool_free :%d\n", obj_pool_free);
+ seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
+ seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
- seq_printf(m, "pool_used :%d\n", obj_pool_used);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
@@ -1177,9 +1290,20 @@ free:
*/
void __init debug_objects_mem_init(void)
{
+ int cpu;
+
if (!debug_objects_enabled)
return;
+ /*
+ * Initialize the percpu object pools
+ *
+ * Initialization is not strictly necessary, but was done for
+ * completeness.
+ */
+ for_each_possible_cpu(cpu)
+ INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
+
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
@@ -1191,11 +1315,4 @@ void __init debug_objects_mem_init(void)
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
-
- /*
- * Increase the thresholds for allocating and freeing objects
- * according to the number of possible CPUs available in the system.
- */
- debug_objects_pool_size += num_possible_cpus() * 32;
- debug_objects_pool_min_level += num_possible_cpus() * 4;
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index fec610703095..d56a78beb279 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -984,7 +984,7 @@ static int __init dynamic_debug_init(void)
int n = 0, entries = 0, modct = 0;
int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
+ if (&__start___verbose == &__stop___verbose) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
}
diff --git a/lib/idr.c b/lib/idr.c
index 82c24a417dc6..432a985bf772 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
diff --git a/lib/kobject.c b/lib/kobject.c
index 97d86dc17c42..a45eb05c79fe 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -135,7 +135,7 @@ static int get_kobj_path_length(struct kobject *kobj)
return length;
}
-static void fill_kobj_path(struct kobject *kobj, char *path, int length)
+static int fill_kobj_path(struct kobject *kobj, char *path, int length)
{
struct kobject *parent;
@@ -144,12 +144,16 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
+ if (length <= 0)
+ return -EINVAL;
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
+
+ return 0;
}
/**
@@ -165,13 +169,17 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
char *path;
int len;
+retry:
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
- fill_kobj_path(kobj, path, len);
+ if (fill_kobj_path(kobj, path, len)) {
+ kfree(path);
+ goto retry;
+ }
return path;
}
@@ -829,6 +837,11 @@ int kset_register(struct kset *k)
if (!k)
return -EINVAL;
+ if (!k->kobj.ktype) {
+ pr_err("must have a ktype to be initialized properly!\n");
+ return -EINVAL;
+ }
+
kset_init(k);
err = kobject_add_internal(&k->kobj);
if (err)
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 6c5229f98c9e..cac4e5aee739 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -639,30 +639,12 @@ do { \
************** MIPS *****************
***************************************/
#if defined(__mips__) && W_TYPE_SIZE == 32
-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#define umul_ppmm(w1, w0, u, v) \
do { \
UDItype __ll = (UDItype)(u) * (v); \
w1 = __ll >> 32; \
w0 = __ll; \
} while (0)
-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("multu %2,%3" \
- : "=l" ((USItype)(w0)), \
- "=h" ((USItype)(w1)) \
- : "d" ((USItype)(u)), \
- "d" ((USItype)(v)))
-#else
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("multu %2,%3\n" \
- "mflo %0\n" \
- "mfhi %1" \
- : "=d" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
- : "d" ((USItype)(u)), \
- "d" ((USItype)(v)))
-#endif
#define UMUL_TIME 10
#define UDIV_TIME 100
#endif /* __mips__ */
@@ -687,7 +669,7 @@ do { \
: "d" ((UDItype)(u)), \
"d" ((UDItype)(v))); \
} while (0)
-#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+#else
#define umul_ppmm(w1, w0, u, v) \
do { \
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
@@ -695,22 +677,6 @@ do { \
w1 = __ll >> 64; \
w0 = __ll; \
} while (0)
-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("dmultu %2,%3" \
- : "=l" ((UDItype)(w0)), \
- "=h" ((UDItype)(w1)) \
- : "d" ((UDItype)(u)), \
- "d" ((UDItype)(v)))
-#else
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("dmultu %2,%3\n" \
- "mflo %0\n" \
- "mfhi %1" \
- : "=d" ((UDItype)(w0)), \
- "=d" ((UDItype)(w1)) \
- : "d" ((UDItype)(u)), \
- "d" ((UDItype)(v)))
#endif
#define UMUL_TIME 20
#define UDIV_TIME 140
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
index d25e9e96c310..ceaebe181cd7 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/mpi/mpi-cmp.c
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
mpi_limb_t limb = v;
mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
if (u->sign)
return -1;
if (u->nlimbs > 1)
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index eead4b339466..4f73db248009 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -397,7 +397,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
while (sg_miter_next(&miter)) {
buff = miter.addr;
- len = miter.length;
+ len = min_t(unsigned, miter.length, nbytes);
+ nbytes -= len;
for (x = 0; x < len; x++) {
a <<= 8;
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index eb4a04afea80..125ea8ce23a4 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -14,7 +14,7 @@ static int debugfs_errno_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e5cab5c4e383..8d29fa5b2695 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1498,7 +1498,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int uninitialized_var(offset);
+ int offset;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index a74b1aae7461..be3baea88b61 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -160,7 +160,7 @@ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
{
*dst = kstrndup(name, count, gfp);
if (!*dst)
- return -ENOSPC;
+ return -ENOMEM;
return count;
}
@@ -284,16 +284,26 @@ static ssize_t config_test_show_str(char *dst,
return len;
}
-static int test_dev_config_update_bool(const char *buf, size_t size,
- bool *cfg)
+static inline int __test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
{
int ret;
- mutex_lock(&test_fw_mutex);
if (strtobool(buf, cfg) < 0)
ret = -EINVAL;
else
ret = size;
+
+ return ret;
+}
+
+static int test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_bool(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
return ret;
@@ -323,7 +333,7 @@ static ssize_t test_dev_config_show_int(char *buf, int cfg)
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+static inline int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
int ret;
long new;
@@ -335,14 +345,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
if (new > U8_MAX)
return -EINVAL;
- mutex_lock(&test_fw_mutex);
*(u8 *)cfg = new;
- mutex_unlock(&test_fw_mutex);
/* Always return full write size even if we didn't consume all */
return size;
}
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_u8(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
{
u8 val;
@@ -375,10 +394,10 @@ static ssize_t config_num_requests_store(struct device *dev,
mutex_unlock(&test_fw_mutex);
goto out;
}
- mutex_unlock(&test_fw_mutex);
- rc = test_dev_config_update_u8(buf, count,
- &test_fw_config->num_requests);
+ rc = __test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+ mutex_unlock(&test_fw_mutex);
out:
return rc;
@@ -456,7 +475,7 @@ static ssize_t trigger_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
@@ -497,7 +516,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
@@ -540,7 +559,7 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s' using custom fallback mechanism\n", name);
@@ -618,6 +637,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -721,6 +745,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -902,6 +931,7 @@ static int __init test_firmware_init(void)
rc = misc_register(&test_fw_misc_device);
if (rc) {
+ __test_firmware_config_free();
kfree(test_fw_config);
pr_err("could not register misc device: %d\n", rc);
return rc;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 9e66ee4020e9..5de382e79a45 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -64,10 +64,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen - 1, bs;
+ int bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
+ int shift = bm->patlen - 1;
+
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 3744b2a8e591..1e99c1baf4ff 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/uaccess.h>
+#include <linux/nospec.h>
/* out-of-line parts */
@@ -9,6 +10,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(VERIFY_READ, from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}