aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/Kconfig.debug21
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bug.c11
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/crc-t10dif.c54
-rw-r--r--lib/crypto/Makefile4
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/devres.c30
-rw-r--r--lib/dump_stack.c7
-rw-r--r--lib/dynamic_debug.c23
-rw-r--r--lib/find_bit.c16
-rw-r--r--lib/idr.c31
-rw-r--r--lib/irq_poll.c5
-rw-r--r--lib/locking-selftest.c50
-rw-r--r--lib/lzo/lzo1x_compress.c13
-rw-r--r--lib/mpi/longlong.h36
-rw-r--r--lib/nmi_backtrace.c6
-rw-r--r--lib/printk_ringbuffer.c589
-rw-r--r--lib/radix-tree.c30
-rw-r--r--lib/raid6/test/Makefile6
-rw-r--r--lib/raid6/unroll.awk2
-rw-r--r--lib/random32.c2
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/smp_processor_id.c7
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/strncpy_from_user.c14
-rw-r--r--lib/strnlen_user.c14
-rw-r--r--lib/test_kasan.c82
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_objagg.c4
-rw-r--r--lib/test_printf.c19
-rw-r--r--lib/test_xarray.c157
-rw-r--r--lib/ubsan.c69
-rw-r--r--lib/vsprintf.c22
-rw-r--r--lib/xarray.c50
-rw-r--r--lib/zlib_inflate/inffast.c91
39 files changed, 1307 insertions, 261 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 90623a0e1942..3b3a28671b41 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -469,6 +469,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
+ depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 120ec6f64bbc..fe278024202d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -61,6 +61,23 @@ config CONSOLE_LOGLEVEL_QUIET
will be used as the loglevel. IOW passing "quiet" will be the
equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+config CONSOLE_LOGLEVEL_EMERGENCY
+ int "Emergency console loglevel (1-15)"
+ range 1 15
+ default "5"
+ help
+ The loglevel to determine if a console message is an emergency
+ message.
+
+ If supported by the console driver, emergency messages will be
+ flushed to the console immediately. This can cause significant system
+ latencies so the value should be set such that only significant
+ messages are classified as emergency messages.
+
+ Setting a default here is equivalent to passing in
+ emergency_loglevel=<x> in the kernel bootargs. emergency_loglevel=<x>
+ continues to override whatever value is specified here as well.
+
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
@@ -223,6 +240,8 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on DEBUG_INFO
+ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@@ -1251,7 +1270,7 @@ config DEBUG_ATOMIC_SLEEP
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL
help
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
diff --git a/lib/Makefile b/lib/Makefile
index 7c3c1ad21afc..8473ebfe985d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -30,7 +30,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o \
+ idr.o extable.o printk_ringbuffer.o \
sha1.o chacha.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
@@ -102,7 +102,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/
+obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..8c98af0bf585 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -181,6 +181,15 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
}
}
+ /*
+ * BUG() and WARN_ON() families don't print a custom debug message
+ * before triggering the exception handler, so we must add the
+ * "cut here" line now. WARN() issues its own "cut here" before the
+ * extra debugging message it writes before triggering the handler.
+ */
+ if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
+ printk(KERN_DEFAULT CUT_HERE);
+
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
@@ -188,8 +197,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_WARN;
}
- printk(KERN_DEFAULT CUT_HERE);
-
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 8be59f84eaea..c6e083323d1b 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -26,7 +26,6 @@ void bust_spinlocks(int yes)
unblank_screen();
#endif
console_unblank();
- if (--oops_in_progress == 0)
- wake_up_klogd();
+ --oops_in_progress;
}
}
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 8cc01a603416..c9acf1c12cfc 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -19,39 +19,46 @@
static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
static DEFINE_MUTEX(crc_t10dif_mutex);
+static struct work_struct crct10dif_rehash_work;
-static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
- struct crypto_shash *new, *old;
if (val != CRYPTO_MSG_ALG_LOADED ||
static_key_false(&crct10dif_fallback) ||
strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
return 0;
+ schedule_work(&crct10dif_rehash_work);
+ return 0;
+}
+
+static void crc_t10dif_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
+
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
if (!old) {
mutex_unlock(&crc_t10dif_mutex);
- return 0;
+ return;
}
new = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
- return 0;
+ return;
}
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
synchronize_rcu();
crypto_free_shash(old);
- return 0;
}
static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_rehash,
+ .notifier_call = crc_t10dif_notify,
};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
@@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ struct crypto_shash *tfm;
+
+ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
- if (IS_ERR(crct10dif_tfm)) {
+ mutex_lock(&crc_t10dif_mutex);
+ tfm = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(tfm)) {
static_key_slow_inc(&crct10dif_fallback);
- crct10dif_tfm = NULL;
+ tfm = NULL;
}
+ RCU_INIT_POINTER(crct10dif_tfm, tfm);
+ mutex_unlock(&crc_t10dif_mutex);
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_unregister_notifier(&crc_t10dif_nb);
- crypto_free_shash(crct10dif_tfm);
+ cancel_work_sync(&crct10dif_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
}
module_init(crc_t10dif_mod_init);
@@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini);
static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
{
+ struct crypto_shash *tfm;
+ const char *name;
+ int len;
+
if (static_key_false(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
- return sprintf(buffer, "%s\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+ rcu_read_lock();
+ tfm = rcu_dereference(crct10dif_tfm);
+ if (!tfm) {
+ len = sprintf(buffer, "init\n");
+ goto unlock;
+ }
+
+ name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+ len = sprintf(buffer, "%s\n", name);
+
+unlock:
+ rcu_read_unlock();
+
+ return len;
}
module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000000..88195c34932d
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
+libarc4-y := arc4.o
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000000..c2020f19c652
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API
+ *
+ * ARC4 Cipher Algorithm
+ *
+ * Jon Oberheide <jon@oberheide.org>
+ */
+
+#include <crypto/arc4.h>
+#include <linux/module.h>
+
+int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+{
+ int i, j = 0, k = 0;
+
+ ctx->x = 1;
+ ctx->y = 0;
+
+ for (i = 0; i < 256; i++)
+ ctx->S[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ u32 a = ctx->S[i];
+
+ j = (j + in_key[k] + a) & 0xff;
+ ctx->S[i] = ctx->S[j];
+ ctx->S[j] = a;
+ if (++k >= key_len)
+ k = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(arc4_setkey);
+
+void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+{
+ u32 *const S = ctx->S;
+ u32 x, y, a, b;
+ u32 ty, ta, tb;
+
+ if (len == 0)
+ return;
+
+ x = ctx->x;
+ y = ctx->y;
+
+ a = S[x];
+ y = (y + a) & 0xff;
+ b = S[y];
+
+ do {
+ S[y] = a;
+ a = (a + b) & 0xff;
+ S[x] = b;
+ x = (x + 1) & 0xff;
+ ta = S[x];
+ ty = (y + ta) & 0xff;
+ tb = S[ty];
+ *out++ = *in++ ^ S[a];
+ if (--len == 0)
+ break;
+ y = ty;
+ a = ta;
+ b = tb;
+ } while (true);
+
+ ctx->x = x;
+ ctx->y = y;
+}
+EXPORT_SYMBOL(arc4_crypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 55437fd5128b..6f6db6b15c64 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -376,7 +376,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (preempt_count() == 0 && !irqs_disabled())
+#endif
+ fill_pool();
db = get_bucket((unsigned long) addr);
diff --git a/lib/devres.c b/lib/devres.c
index 6a0e9bd6524a..77c80ca9e485 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -9,6 +9,7 @@
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
};
@@ -39,6 +40,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP_NC:
addr = ioremap_nocache(offset, size);
break;
+ case DEVM_IOREMAP_UC:
+ addr = ioremap_uc(offset, size);
+ break;
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
@@ -69,6 +73,21 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap);
/**
+ * devm_ioremap_uc - Managed ioremap_uc()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
+}
+EXPORT_SYMBOL_GPL(devm_ioremap_uc);
+
+/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
@@ -136,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev,
{
resource_size_t size;
void __iomem *dest_ptr;
+ char *pretty_name;
BUG_ON(!dev);
@@ -146,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev,
size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
+ if (res->name)
+ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ dev_name(dev), res->name);
+ else
+ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!pretty_name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 5cff72f18c4a..33ffbf308853 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -106,7 +106,12 @@ retry:
was_locked = 1;
} else {
local_irq_restore(flags);
- cpu_relax();
+ /*
+ * Wait for the lock to release before jumping to
+ * atomic_cmpxchg() in order to mitigate the thundering herd
+ * problem.
+ */
+ do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry;
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..af9aba06c59f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -87,22 +87,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_NONE, '_' },
};
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
/* format a string into buf[] which describes the _ddebug's flags */
-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
- size_t maxlen)
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
{
- char *p = buf;
+ char *p = fb->buf;
int i;
- BUG_ON(maxlen < 6);
for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
- if (dp->flags & opt_array[i].flag)
+ if (flags & opt_array[i].flag)
*p++ = opt_array[i].opt_char;
- if (p == buf)
+ if (p == fb->buf)
*p++ = '_';
*p = '\0';
- return buf;
+ return fb->buf;
}
#define vpr_info(fmt, ...) \
@@ -144,7 +144,7 @@ static int ddebug_change(const struct ddebug_query *query,
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- char flagbuf[10];
+ struct flagsbuf fbuf;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@ -201,8 +201,7 @@ static int ddebug_change(const struct ddebug_query *query,
vpr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dt->mod_name, dp->function,
- ddebug_describe_flags(dp, flagbuf,
- sizeof(flagbuf)));
+ ddebug_describe_flags(dp->flags, &fbuf));
}
}
mutex_unlock(&ddebug_lock);
@@ -816,7 +815,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
{
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
- char flagsbuf[10];
+ struct flagsbuf flags;
vpr_info("called m=%p p=%p\n", m, p);
@@ -829,7 +828,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
seq_printf(m, "%s:%u [%s]%s =%s \"",
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
+ ddebug_describe_flags(dp->flags, &flags));
seq_escape(m, dp->format, "\t\r\n\"");
seq_puts(m, "\"\n");
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 5c51eb45178a..4e68490fa703 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -149,18 +149,6 @@ EXPORT_SYMBOL(find_last_bit);
#ifdef __BIG_ENDIAN
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swab(const unsigned long y)
-{
-#if BITS_PER_LONG == 64
- return (unsigned long) __swab64((u64) y);
-#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32((u32) y);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
@@ -177,7 +165,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
tmp ^= invert;
/* Handle 1st word. */
- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
+ tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
@@ -191,7 +179,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
tmp ^= invert;
}
- return min(start + __ffs(ext2_swab(tmp)), nbits);
+ return min(start + __ffs(swab(tmp)), nbits);
}
#endif
diff --git a/lib/idr.c b/lib/idr.c
index 66a374892482..c2cf2c52bbde 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr,
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next() - Find next populated entry.
+ * idr_get_next_ul() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next(struct idr *idr, int *nextid)
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
@@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid)
}
if (!slot)
return NULL;
- id = iter.index + base;
-
- if (WARN_ON_ONCE(id > INT_MAX))
- return NULL;
- *nextid = id;
+ *nextid = iter.index + base;
return entry;
}
-EXPORT_SYMBOL(idr_get_next);
+EXPORT_SYMBOL(idr_get_next_ul);
/**
- * idr_get_next_ul() - Find next populated entry.
+ * idr_get_next() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
+void *idr_get_next(struct idr *idr, int *nextid)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned long base = idr->idr_base;
unsigned long id = *nextid;
+ void *entry = idr_get_next_ul(idr, &id);
- id = (id < base) ? 0 : id - base;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
- if (!slot)
+ if (WARN_ON_ONCE(id > INT_MAX))
return NULL;
-
- *nextid = iter.index + base;
- return rcu_dereference_raw(*slot);
+ *nextid = id;
+ return entry;
}
-EXPORT_SYMBOL(idr_get_next_ul);
+EXPORT_SYMBOL(idr_get_next);
/**
* idr_replace() - replace pointer for given ID.
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2f17b488d58e..7557bf7ecf1f 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
EXPORT_SYMBOL(irq_poll_sched);
@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
local_irq_save(flags);
__irq_poll_complete(iop);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
EXPORT_SYMBOL(irq_poll_complete);
@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
}
local_irq_enable();
+ preempt_check_resched_rt();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
/**
@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
return 0;
}
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a1705545e6ac..cc38c3ede842 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+#endif
+
#undef E1
#undef E2
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1
#undef E2
+#endif
+
/*
* Enabling irqs with an irq-safe lock held:
*/
@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+#endif
+
#undef E1
#undef E2
@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+#endif
+
#undef E1
#undef E2
#undef E3
@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+#endif
+
#undef E1
#undef E2
#undef E3
+#ifndef CONFIG_PREEMPT_RT_FULL
+
/*
* read-lock / write-lock irq inversion.
*
@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#undef E2
#undef E3
+#endif
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+
/*
* read-lock / write-lock recursion that is actually safe.
*/
@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
#undef E2
#undef E3
+#endif
+
/*
* read-lock / write-lock recursion that is unsafe.
*/
@@ -2058,6 +2085,7 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* irq-context testcases:
*/
@@ -2070,6 +2098,28 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
+#else
+ /* On -rt, we only do hardirq context test for raw spinlock */
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
+
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
+
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
+
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
+#endif
ww_tests();
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 717c940112f9..8ad5ba2b86e2 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -268,6 +268,19 @@ m_len_done:
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
else {
+ if (unlikely(((m_off & 0x403f) == 0x403f)
+ && (m_len >= 261)
+ && (m_len <= 264))
+ && likely(bitstream_version)) {
+ // Under lzo-rle, block copies
+ // for 261 <= length <= 264 and
+ // (distance & 0x80f3) == 0x80f3
+ // can result in ambiguous
+ // output. Adjust length
+ // to 260 to prevent ambiguity.
+ ip -= m_len - 260;
+ m_len = 260;
+ }
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 08c60d10747f..6c5229f98c9e 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,7 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
@@ -756,22 +756,22 @@ do { \
do { \
if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \
@@ -781,36 +781,36 @@ do { \
do { \
if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
@@ -821,7 +821,7 @@ do { \
do { \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype) ph) \
+ : "=r" (ph) \
: "%r" (__m0), \
"r" (__m1)); \
(pl) = __m0 * __m1; \
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 15ca78e1c7d4..77bf84987cda 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
touch_softlockup_watchdog();
}
- /*
- * Force flush any remote buffers that might be stuck in IRQ context
- * and therefore could not run their irq_work.
- */
- printk_safe_flush();
-
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
}
diff --git a/lib/printk_ringbuffer.c b/lib/printk_ringbuffer.c
new file mode 100644
index 000000000000..9a31d7dbdc00
--- /dev/null
+++ b/lib/printk_ringbuffer.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/printk_ringbuffer.h>
+
+#define PRB_SIZE(rb) (1 << rb->size_bits)
+#define PRB_SIZE_BITMASK(rb) (PRB_SIZE(rb) - 1)
+#define PRB_INDEX(rb, lpos) (lpos & PRB_SIZE_BITMASK(rb))
+#define PRB_WRAPS(rb, lpos) (lpos >> rb->size_bits)
+#define PRB_WRAP_LPOS(rb, lpos, xtra) \
+ ((PRB_WRAPS(rb, lpos) + xtra) << rb->size_bits)
+#define PRB_DATA_SIZE(e) (e->size - sizeof(struct prb_entry))
+#define PRB_DATA_ALIGN sizeof(long)
+
+static bool __prb_trylock(struct prb_cpulock *cpu_lock,
+ unsigned int *cpu_store)
+{
+ unsigned long *flags;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+
+ *cpu_store = atomic_read(&cpu_lock->owner);
+ /* memory barrier to ensure the current lock owner is visible */
+ smp_rmb();
+ if (*cpu_store == -1) {
+ flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
+ local_irq_save(*flags);
+ if (atomic_try_cmpxchg_acquire(&cpu_lock->owner,
+ cpu_store, cpu)) {
+ return true;
+ }
+ local_irq_restore(*flags);
+ } else if (*cpu_store == cpu) {
+ return true;
+ }
+
+ put_cpu();
+ return false;
+}
+
+/*
+ * prb_lock: Perform a processor-reentrant spin lock.
+ * @cpu_lock: A pointer to the lock object.
+ * @cpu_store: A "flags" pointer to store lock status information.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately. If lock is locked by another
+ * processor, this function spins until the calling processor becomes the
+ * owner.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store)
+{
+ for (;;) {
+ if (__prb_trylock(cpu_lock, cpu_store))
+ break;
+ cpu_relax();
+ }
+}
+
+/*
+ * prb_unlock: Perform a processor-reentrant spin unlock.
+ * @cpu_lock: A pointer to the lock object.
+ * @cpu_store: A "flags" object storing lock status information.
+ *
+ * Release the lock. The calling processor must be the owner of the lock.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store)
+{
+ unsigned long *flags;
+ unsigned int cpu;
+
+ cpu = atomic_read(&cpu_lock->owner);
+ atomic_set_release(&cpu_lock->owner, cpu_store);
+
+ if (cpu_store == -1) {
+ flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
+ local_irq_restore(*flags);
+ }
+
+ put_cpu();
+}
+
+static struct prb_entry *to_entry(struct printk_ringbuffer *rb,
+ unsigned long lpos)
+{
+ char *buffer = rb->buffer;
+ buffer += PRB_INDEX(rb, lpos);
+ return (struct prb_entry *)buffer;
+}
+
+static int calc_next(struct printk_ringbuffer *rb, unsigned long tail,
+ unsigned long lpos, int size, unsigned long *calced_next)
+{
+ unsigned long next_lpos;
+ int ret = 0;
+again:
+ next_lpos = lpos + size;
+ if (next_lpos - tail > PRB_SIZE(rb))
+ return -1;
+
+ if (PRB_WRAPS(rb, lpos) != PRB_WRAPS(rb, next_lpos)) {
+ lpos = PRB_WRAP_LPOS(rb, next_lpos, 0);
+ ret |= 1;
+ goto again;
+ }
+
+ *calced_next = next_lpos;
+ return ret;
+}
+
+static bool push_tail(struct printk_ringbuffer *rb, unsigned long tail)
+{
+ unsigned long new_tail;
+ struct prb_entry *e;
+ unsigned long head;
+
+ if (tail != atomic_long_read(&rb->tail))
+ return true;
+
+ e = to_entry(rb, tail);
+ if (e->size != -1)
+ new_tail = tail + e->size;
+ else
+ new_tail = PRB_WRAP_LPOS(rb, tail, 1);
+
+ /* make sure the new tail does not overtake the head */
+ head = atomic_long_read(&rb->head);
+ if (head - new_tail > PRB_SIZE(rb))
+ return false;
+
+ atomic_long_cmpxchg(&rb->tail, tail, new_tail);
+ return true;
+}
+
+/*
+ * prb_commit: Commit a reserved entry to the ring buffer.
+ * @h: An entry handle referencing the data entry to commit.
+ *
+ * Commit data that has been reserved using prb_reserve(). Once the data
+ * block has been committed, it can be invalidated at any time. If a writer
+ * is interested in using the data after committing, the writer should make
+ * its own copy first or use the prb_iter_ reader functions to access the
+ * data in the ring buffer.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void prb_commit(struct prb_handle *h)
+{
+ struct printk_ringbuffer *rb = h->rb;
+ bool changed = false;
+ struct prb_entry *e;
+ unsigned long head;
+ unsigned long res;
+
+ for (;;) {
+ if (atomic_read(&rb->ctx) != 1) {
+ /* the interrupted context will fixup head */
+ atomic_dec(&rb->ctx);
+ break;
+ }
+ /* assign sequence numbers before moving head */
+ head = atomic_long_read(&rb->head);
+ res = atomic_long_read(&rb->reserve);
+ while (head != res) {
+ e = to_entry(rb, head);
+ if (e->size == -1) {
+ head = PRB_WRAP_LPOS(rb, head, 1);
+ continue;
+ }
+ while (atomic_long_read(&rb->lost)) {
+ atomic_long_dec(&rb->lost);
+ rb->seq++;
+ }
+ e->seq = ++rb->seq;
+ head += e->size;
+ changed = true;
+ }
+ atomic_long_set_release(&rb->head, res);
+
+ atomic_dec(&rb->ctx);
+
+ if (atomic_long_read(&rb->reserve) == res)
+ break;
+ atomic_inc(&rb->ctx);
+ }
+
+ prb_unlock(rb->cpulock, h->cpu);
+
+ if (changed) {
+ atomic_long_inc(&rb->wq_counter);
+ if (wq_has_sleeper(rb->wq)) {
+#ifdef CONFIG_IRQ_WORK
+ irq_work_queue(rb->wq_work);
+#else
+ if (!in_nmi())
+ wake_up_interruptible_all(rb->wq);
+#endif
+ }
+ }
+}
+
+/*
+ * prb_reserve: Reserve an entry within a ring buffer.
+ * @h: An entry handle to be setup and reference an entry.
+ * @rb: A ring buffer to reserve data within.
+ * @size: The number of bytes to reserve.
+ *
+ * Reserve an entry of at least @size bytes to be used by the caller. If
+ * successful, the data region of the entry belongs to the caller and cannot
+ * be invalidated by any other task/context. For this reason, the caller
+ * should call prb_commit() as quickly as possible in order to avoid preventing
+ * other tasks/contexts from reserving data in the case that the ring buffer
+ * has wrapped.
+ *
+ * It is safe to call this function from any context and state.
+ *
+ * Returns a pointer to the reserved entry (and @h is setup to reference that
+ * entry) or NULL if it was not possible to reserve data.
+ */
+char *prb_reserve(struct prb_handle *h, struct printk_ringbuffer *rb,
+ unsigned int size)
+{
+ unsigned long tail, res1, res2;
+ int ret;
+
+ if (size == 0)
+ return NULL;
+ size += sizeof(struct prb_entry);
+ size += PRB_DATA_ALIGN - 1;
+ size &= ~(PRB_DATA_ALIGN - 1);
+ if (size >= PRB_SIZE(rb))
+ return NULL;
+
+ h->rb = rb;
+ prb_lock(rb->cpulock, &h->cpu);
+
+ atomic_inc(&rb->ctx);
+
+ do {
+ for (;;) {
+ tail = atomic_long_read(&rb->tail);
+ res1 = atomic_long_read(&rb->reserve);
+ ret = calc_next(rb, tail, res1, size, &res2);
+ if (ret >= 0)
+ break;
+ if (!push_tail(rb, tail)) {
+ prb_commit(h);
+ return NULL;
+ }
+ }
+ } while (!atomic_long_try_cmpxchg_acquire(&rb->reserve, &res1, res2));
+
+ h->entry = to_entry(rb, res1);
+
+ if (ret) {
+ /* handle wrap */
+ h->entry->size = -1;
+ h->entry = to_entry(rb, PRB_WRAP_LPOS(rb, res2, 0));
+ }
+
+ h->entry->size = size;
+
+ return &h->entry->data[0];
+}
+
+/*
+ * prb_iter_copy: Copy an iterator.
+ * @dest: The iterator to copy to.
+ * @src: The iterator to copy from.
+ *
+ * Make a deep copy of an iterator. This is particularly useful for making
+ * backup copies of an iterator in case a form of rewinding it needed.
+ *
+ * It is safe to call this function from any context and state. But
+ * note that this function is not atomic. Callers should not make copies
+ * to/from iterators that can be accessed by other tasks/contexts.
+ */
+void prb_iter_copy(struct prb_iterator *dest, struct prb_iterator *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+/*
+ * prb_iter_init: Initialize an iterator for a ring buffer.
+ * @iter: The iterator to initialize.
+ * @rb: A ring buffer to that @iter should iterate.
+ * @seq: The sequence number of the position preceding the first record.
+ * May be NULL.
+ *
+ * Initialize an iterator to be used with a specified ring buffer. If @seq
+ * is non-NULL, it will be set such that prb_iter_next() will provide a
+ * sequence value of "@seq + 1" if no records were missed.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void prb_iter_init(struct prb_iterator *iter, struct printk_ringbuffer *rb,
+ u64 *seq)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->rb = rb;
+ iter->lpos = PRB_INIT;
+
+ if (!seq)
+ return;
+
+ for (;;) {
+ struct prb_iterator tmp_iter;
+ int ret;
+
+ prb_iter_copy(&tmp_iter, iter);
+
+ ret = prb_iter_next(&tmp_iter, NULL, 0, seq);
+ if (ret < 0)
+ continue;
+
+ if (ret == 0)
+ *seq = 0;
+ else
+ (*seq)--;
+ break;
+ }
+}
+
+static bool is_valid(struct printk_ringbuffer *rb, unsigned long lpos)
+{
+ unsigned long head, tail;
+
+ tail = atomic_long_read(&rb->tail);
+ head = atomic_long_read(&rb->head);
+ head -= tail;
+ lpos -= tail;
+
+ if (lpos >= head)
+ return false;
+ return true;
+}
+
+/*
+ * prb_iter_data: Retrieve the record data at the current position.
+ * @iter: Iterator tracking the current position.
+ * @buf: A buffer to store the data of the record. May be NULL.
+ * @size: The size of @buf. (Ignored if @buf is NULL.)
+ * @seq: The sequence number of the record. May be NULL.
+ *
+ * If @iter is at a record, provide the data and/or sequence number of that
+ * record (if specified by the caller).
+ *
+ * It is safe to call this function from any context and state.
+ *
+ * Returns >=0 if the current record contains valid data (returns 0 if @buf
+ * is NULL or returns the size of the data block if @buf is non-NULL) or
+ * -EINVAL if @iter is now invalid.
+ */
+int prb_iter_data(struct prb_iterator *iter, char *buf, int size, u64 *seq)
+{
+ struct printk_ringbuffer *rb = iter->rb;
+ unsigned long lpos = iter->lpos;
+ unsigned int datsize = 0;
+ struct prb_entry *e;
+
+ if (buf || seq) {
+ e = to_entry(rb, lpos);
+ if (!is_valid(rb, lpos))
+ return -EINVAL;
+ /* memory barrier to ensure valid lpos */
+ smp_rmb();
+ if (buf) {
+ datsize = PRB_DATA_SIZE(e);
+ /* memory barrier to ensure load of datsize */
+ smp_rmb();
+ if (!is_valid(rb, lpos))
+ return -EINVAL;
+ if (PRB_INDEX(rb, lpos) + datsize >
+ PRB_SIZE(rb) - PRB_DATA_ALIGN) {
+ return -EINVAL;
+ }
+ if (size > datsize)
+ size = datsize;
+ memcpy(buf, &e->data[0], size);
+ }
+ if (seq)
+ *seq = e->seq;
+ /* memory barrier to ensure loads of entry data */
+ smp_rmb();
+ }
+
+ if (!is_valid(rb, lpos))
+ return -EINVAL;
+
+ return datsize;
+}
+
+/*
+ * prb_iter_next: Advance to the next record.
+ * @iter: Iterator tracking the current position.
+ * @buf: A buffer to store the data of the next record. May be NULL.
+ * @size: The size of @buf. (Ignored if @buf is NULL.)
+ * @seq: The sequence number of the next record. May be NULL.
+ *
+ * If a next record is available, @iter is advanced and (if specified)
+ * the data and/or sequence number of that record are provided.
+ *
+ * It is safe to call this function from any context and state.
+ *
+ * Returns 1 if @iter was advanced, 0 if @iter is at the end of the list, or
+ * -EINVAL if @iter is now invalid.
+ */
+int prb_iter_next(struct prb_iterator *iter, char *buf, int size, u64 *seq)
+{
+ struct printk_ringbuffer *rb = iter->rb;
+ unsigned long next_lpos;
+ struct prb_entry *e;
+ unsigned int esize;
+
+ if (iter->lpos == PRB_INIT) {
+ next_lpos = atomic_long_read(&rb->tail);
+ } else {
+ if (!is_valid(rb, iter->lpos))
+ return -EINVAL;
+ /* memory barrier to ensure valid lpos */
+ smp_rmb();
+ e = to_entry(rb, iter->lpos);
+ esize = e->size;
+ /* memory barrier to ensure load of size */
+ smp_rmb();
+ if (!is_valid(rb, iter->lpos))
+ return -EINVAL;
+ next_lpos = iter->lpos + esize;
+ }
+ if (next_lpos == atomic_long_read(&rb->head))
+ return 0;
+ if (!is_valid(rb, next_lpos))
+ return -EINVAL;
+ /* memory barrier to ensure valid lpos */
+ smp_rmb();
+
+ iter->lpos = next_lpos;
+ e = to_entry(rb, iter->lpos);
+ esize = e->size;
+ /* memory barrier to ensure load of size */
+ smp_rmb();
+ if (!is_valid(rb, iter->lpos))
+ return -EINVAL;
+ if (esize == -1)
+ iter->lpos = PRB_WRAP_LPOS(rb, iter->lpos, 1);
+
+ if (prb_iter_data(iter, buf, size, seq) < 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+/*
+ * prb_iter_wait_next: Advance to the next record, blocking if none available.
+ * @iter: Iterator tracking the current position.
+ * @buf: A buffer to store the data of the next record. May be NULL.
+ * @size: The size of @buf. (Ignored if @buf is NULL.)
+ * @seq: The sequence number of the next record. May be NULL.
+ *
+ * If a next record is already available, this function works like
+ * prb_iter_next(). Otherwise block interruptible until a next record is
+ * available.
+ *
+ * When a next record is available, @iter is advanced and (if specified)
+ * the data and/or sequence number of that record are provided.
+ *
+ * This function might sleep.
+ *
+ * Returns 1 if @iter was advanced, -EINVAL if @iter is now invalid, or
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int prb_iter_wait_next(struct prb_iterator *iter, char *buf, int size, u64 *seq)
+{
+ unsigned long last_seen;
+ int ret;
+
+ for (;;) {
+ last_seen = atomic_long_read(&iter->rb->wq_counter);
+
+ ret = prb_iter_next(iter, buf, size, seq);
+ if (ret != 0)
+ break;
+
+ ret = wait_event_interruptible(*iter->rb->wq,
+ last_seen != atomic_long_read(&iter->rb->wq_counter));
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * prb_iter_seek: Seek forward to a specific record.
+ * @iter: Iterator to advance.
+ * @seq: Record number to advance to.
+ *
+ * Advance @iter such that a following call to prb_iter_data() will provide
+ * the contents of the specified record. If a record is specified that does
+ * not yet exist, advance @iter to the end of the record list.
+ *
+ * Note that iterators cannot be rewound. So if a record is requested that
+ * exists but is previous to @iter in position, @iter is considered invalid.
+ *
+ * It is safe to call this function from any context and state.
+ *
+ * Returns 1 on succces, 0 if specified record does not yet exist (@iter is
+ * now at the end of the list), or -EINVAL if @iter is now invalid.
+ */
+int prb_iter_seek(struct prb_iterator *iter, u64 seq)
+{
+ u64 cur_seq;
+ int ret;
+
+ /* first check if the iterator is already at the wanted seq */
+ if (seq == 0) {
+ if (iter->lpos == PRB_INIT)
+ return 1;
+ else
+ return -EINVAL;
+ }
+ if (iter->lpos != PRB_INIT) {
+ if (prb_iter_data(iter, NULL, 0, &cur_seq) >= 0) {
+ if (cur_seq == seq)
+ return 1;
+ if (cur_seq > seq)
+ return -EINVAL;
+ }
+ }
+
+ /* iterate to find the wanted seq */
+ for (;;) {
+ ret = prb_iter_next(iter, NULL, 0, &cur_seq);
+ if (ret <= 0)
+ break;
+
+ if (cur_seq == seq)
+ break;
+
+ if (cur_seq > seq) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * prb_buffer_size: Get the size of the ring buffer.
+ * @rb: The ring buffer to get the size of.
+ *
+ * Return the number of bytes used for the ring buffer entry storage area.
+ * Note that this area stores both entry header and entry data. Therefore
+ * this represents an upper bound to the amount of data that can be stored
+ * in the ring buffer.
+ *
+ * It is safe to call this function from any context and state.
+ *
+ * Returns the size in bytes of the entry storage area.
+ */
+int prb_buffer_size(struct printk_ringbuffer *rb)
+{
+ return PRB_SIZE(rb);
+}
+
+/*
+ * prb_inc_lost: Increment the seq counter to signal a lost record.
+ * @rb: The ring buffer to increment the seq of.
+ *
+ * Increment the seq counter so that a seq number is intentially missing
+ * for the readers. This allows readers to identify that a record is
+ * missing. A writer will typically use this function if prb_reserve()
+ * fails.
+ *
+ * It is safe to call this function from any context and state.
+ */
+void prb_inc_lost(struct printk_ringbuffer *rb)
+{
+ atomic_long_inc(&rb->lost);
+}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 18c1dfbb1765..d28bb02a7d7c 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/xarray.h>
-
+#include <linux/locallock.h>
/*
* Radix tree node cache.
@@ -72,6 +72,7 @@ struct radix_tree_preload {
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
@@ -269,12 +270,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = this_cpu_ptr(&radix_tree_preloads);
+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes;
rtp->nodes = ret->parent;
rtp->nr--;
}
+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
/*
* Update the allocation stack trace as this is more useful
* for debugging.
@@ -340,14 +342,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
*/
gfp_mask &= ~__GFP_ACCOUNT;
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
- preempt_enable();
+ local_unlock(radix_tree_preloads_lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
@@ -389,11 +391,17 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
+void radix_tree_preload_end(void)
+{
+ local_unlock(radix_tree_preloads_lock);
+}
+EXPORT_SYMBOL(radix_tree_preload_end);
+
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
@@ -1478,10 +1486,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
}
EXPORT_SYMBOL(idr_preload);
+void idr_preload_end(void)
+{
+ local_unlock(radix_tree_preloads_lock);
+}
+EXPORT_SYMBOL(idr_preload_end);
+
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
@@ -1529,7 +1543,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
offset = radix_tree_find_next_bit(node, IDR_FREE,
offset + 1);
start = next_index(start, node, offset);
- if (start > max)
+ if (start > max || start == 0)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) {
offset = node->offset + 1;
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 3ab8720aa2f8..b9e6c3648be1 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -35,13 +35,13 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
- gcc -c -x assembler - >&/dev/null && \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
- gcc -c -x assembler - >&/dev/null && \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >&/dev/null && \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
index c6aa03631df8..0809805a7e23 100644
--- a/lib/raid6/unroll.awk
+++ b/lib/raid6/unroll.awk
@@ -13,7 +13,7 @@ BEGIN {
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
+ gsub(/\$#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
diff --git a/lib/random32.c b/lib/random32.c
index 763b920a6206..3d749abb9e80 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state);
/**
* prandom_u32_state - seeded pseudo-random number generator.
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 54f57cd117c6..85ab49344a23 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -671,8 +671,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active);
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
}
- add_wait_queue(&ws->wait, &sbq_wait->wait);
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index eacb82468437..3d26ba629199 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -303,7 +303,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (prv)
table->nents = ++table->orig_nents;
- return -ENOMEM;
+ return -ENOMEM;
}
sg_init_table(sg, alloc_size);
@@ -801,7 +801,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON_ONCE(preemptible());
+ WARN_ON_ONCE(!pagefault_disabled());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 157d9e31f6c2..c2f5b0f8cacd 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,7 +23,12 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG))
+ if (current->migrate_disable)
+ goto out;
+#endif
+
+ if (current->nr_cpus_allowed == 1)
goto out;
/*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 66cab785bea0..aae3577b0392 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
return true;
if (stack_slabs[depot_index] == NULL) {
stack_slabs[depot_index] = *prealloc;
+ *prealloc = NULL;
} else {
- stack_slabs[depot_index + 1] = *prealloc;
+ /* If this is the last depot slab, do not touch the next one. */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ stack_slabs[depot_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
/*
* This smp_store_release pairs with smp_load_acquire() from
* |next_slab_inited| above and in depot_save_stack().
*/
smp_store_release(&next_slab_inited, 1);
}
- *prealloc = NULL;
return true;
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 023ba9f3b99f..10af8be1cace 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -29,13 +29,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long res = 0;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
@@ -113,6 +106,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
kasan_check_write(dst, count);
check_object_size(dst, count, false);
if (user_access_begin(src, max)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 7f2db3fe311f..069fc8e463d0 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -32,13 +32,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
unsigned long c;
/*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- /*
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
@@ -114,6 +107,13 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
if (user_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max);
user_access_end();
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index e3c593c38eff..e67c7749272c 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -7,16 +7,17 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -153,6 +154,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
if (!ptr1 || !ptr2) {
pr_err("Allocation failed\n");
kfree(ptr1);
+ kfree(ptr2);
return;
}
@@ -619,6 +621,79 @@ static noinline void __init kasan_strings(void)
strnlen(ptr, 1);
}
+static noinline void __init kasan_bitops(void)
+{
+ /*
+ * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * this way we do not actually corrupt other memory.
+ */
+ long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ /*
+ * Below calls try to access bit within allocated memory; however, the
+ * below accesses are still out-of-bounds, since bitops are defined to
+ * operate on the whole long the bit is in.
+ */
+ pr_info("out-of-bounds in set_bit\n");
+ set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __set_bit\n");
+ __set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit\n");
+ clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit\n");
+ __clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit_unlock\n");
+ clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit_unlock\n");
+ __clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in change_bit\n");
+ change_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __change_bit\n");
+ __change_bit(BITS_PER_LONG, bits);
+
+ /*
+ * Below calls try to access bit beyond allocated memory.
+ */
+ pr_info("out-of-bounds in test_and_set_bit\n");
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_set_bit\n");
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_set_bit_lock\n");
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_clear_bit\n");
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_clear_bit\n");
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_change_bit\n");
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_change_bit\n");
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_bit\n");
+ (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+ clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+#endif
+ kfree(bits);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -660,6 +735,7 @@ static int __init kmalloc_tests_init(void)
kasan_memchr();
kasan_memcmp();
kasan_strings();
+ kasan_bitops();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..87a0cc750ea2 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
break;
case TEST_KMOD_FS_TYPE:
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
break;
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index 72c1abfa154d..da137939a410 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -979,10 +979,10 @@ err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
- objagg_hints_put(hints);
- objagg_destroy(objagg2);
i = hints_case->key_ids_count;
+ objagg_destroy(objagg2);
err_check_expect_hints_stats:
+ objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 944eb50f3862..a10a276e95c3 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -212,6 +212,7 @@ test_string(void)
#define PTR_STR "ffff0123456789ab"
#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
+#define ONES "ffffffff" /* hex 32 one bits */
static int __init
plain_format(void)
@@ -243,6 +244,7 @@ plain_format(void)
#define PTR_STR "456789ab"
#define PTR_VAL_NO_CRNG "(ptrval)"
#define ZEROS ""
+#define ONES ""
static int __init
plain_format(void)
@@ -328,14 +330,28 @@ test_hashed(const char *fmt, const void *p)
test(buf, fmt, p);
}
+/*
+ * NULL pointers aren't hashed.
+ */
static void __init
null_pointer(void)
{
- test_hashed("%p", NULL);
+ test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
test("(null)", "%pE", NULL);
}
+/*
+ * Error pointers aren't hashed.
+ */
+static void __init
+error_pointer(void)
+{
+ test(ONES "fffffff5", "%p", ERR_PTR(-11));
+ test(ONES "fffffff5", "%px", ERR_PTR(-11));
+ test("(efault)", "%pE", ERR_PTR(-11));
+}
+
#define PTR_INVALID ((void *)0x000000ab)
static void __init
@@ -593,6 +609,7 @@ test_pointer(void)
{
plain();
null_pointer();
+ error_pointer();
invalid_pointer();
symbol_ptr();
kernel_ptr();
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 9d631a7b6a70..d4f97925dbd8 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -2,6 +2,7 @@
/*
* test_xarray.c: Test the XArray API
* Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -11,6 +12,9 @@
static unsigned int tests_run;
static unsigned int tests_passed;
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
#ifndef XA_DEBUG
# ifdef __KERNEL__
void xa_dump(const struct xarray *xa) { }
@@ -902,28 +906,34 @@ static noinline void check_store_iter(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
{
#ifdef CONFIG_XARRAY_MULTI
+ unsigned long multi = 3 << order;
+ unsigned long next = 4 << order;
unsigned long index;
- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
index = 0;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, index != 12);
- index = 13;
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, index != multi);
+ index = multi + 1;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(16));
- XA_BUG_ON(xa, index != 16);
-
- xa_erase_index(xa, 12);
- xa_erase_index(xa, 16);
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
+ xa_erase_index(xa, next + 1);
XA_BUG_ON(xa, !xa_empty(xa));
#endif
}
@@ -952,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -1046,13 +1070,35 @@ static noinline void check_find_3(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_find_4(struct xarray *xa)
+{
+ unsigned long index = 0;
+ void *entry;
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry);
+
+ xa_erase_index(xa, ULONG_MAX);
+}
+
static noinline void check_find(struct xarray *xa)
{
+ unsigned i;
+
check_find_1(xa);
check_find_2(xa);
check_find_3(xa);
- check_multi_find(xa);
+ check_find_4(xa);
+
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
check_multi_find_2(xa);
+ check_multi_find_3(xa);
}
/* See find_swap_entry() in mm/shmem.c */
@@ -1110,6 +1156,85 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_move_tiny(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ rcu_read_unlock();
+ xa_store_index(xa, 0, GFP_KERNEL);
+ rcu_read_lock();
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ rcu_read_unlock();
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_max(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ xas_pause(&xas);
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
{
XA_STATE(xas, xa, 0);
@@ -1217,6 +1342,9 @@ static noinline void check_move(struct xarray *xa)
xa_destroy(xa);
+ check_move_tiny(xa);
+ check_move_max(xa);
+
for (i = 0; i < 16; i++)
check_move_small(xa, 1UL << i);
@@ -1572,6 +1700,7 @@ static int xarray_checks(void)
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
+ check_pause(&array);
check_account(&array);
check_destroy(&array);
check_move(&array);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e7d31735950d..f007a406f89c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static DEFINE_SPINLOCK(report_lock);
-
-static void ubsan_prologue(struct source_location *location,
- unsigned long *flags)
+static void ubsan_prologue(struct source_location *location)
{
current->in_ubsan++;
- spin_lock_irqsave(&report_lock, *flags);
pr_err("========================================"
"========================================\n");
print_source_location("UBSAN: Undefined behaviour in", location);
}
-static void ubsan_epilogue(unsigned long *flags)
+static void ubsan_epilogue(void)
{
dump_stack();
pr_err("========================================"
"========================================\n");
- spin_unlock_irqrestore(&report_lock, *flags);
+
current->in_ubsan--;
}
@@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
{
struct type_descriptor *type = data->type;
- unsigned long flags;
char lhs_val_str[VALUE_LENGTH];
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
@@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
rhs_val_str,
type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
@@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
void *old_val)
{
- unsigned long flags;
char old_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
pr_err("negation of %s cannot be represented in type %s:\n",
old_val_str, data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
@@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
void *lhs, void *rhs)
{
- unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
else
pr_err("division by zero\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_misaligned_access(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
(void *)ptr, data->type->type_name);
pr_err("which requires %ld byte alignment\n", data->alignment);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
pr_err("for an object of type %s\n", data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
@@ -351,34 +338,33 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
- unsigned long flags;
char index_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
data->array_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
void *lhs, void *rhs)
{
- unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
char lhs_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -401,18 +387,18 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
lhs_str, rhs_str,
lhs_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
{
- unsigned long flags;
-
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
pr_err("calling __builtin_unreachable()\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
@@ -420,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
void *val)
{
- unsigned long flags;
char val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(val_str, sizeof(val_str), data->type, val);
pr_err("load of value %s is not a valid value for type %s\n",
val_str, data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 63937044c57d..672b1817e639 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -746,6 +746,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ /*
+ * Print the real pointer value for NULL and error pointers,
+ * as they are not actual addresses.
+ */
+ if (IS_ERR_OR_NULL(ptr))
+ return pointer_string(buf, end, ptr, spec);
+
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
hashval = hash_long((unsigned long)ptr, 32);
@@ -869,6 +876,15 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
return widen_string(buf, n, end, spec);
}
+static noinline_for_stack
+char *file_dentry_name(char *buf, char *end, const struct file *f,
+ struct printf_spec spec, const char *fmt)
+{
+ if (check_pointer(&buf, end, f, spec))
+ return buf;
+
+ return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
+}
#ifdef CONFIG_BLOCK
static noinline_for_stack
char *bdev_name(char *buf, char *end, struct block_device *bdev,
@@ -1799,7 +1815,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return error_string(buf, end, "(%pC?)", spec);
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -2166,9 +2182,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
- return dentry_name(buf, end,
- ((const struct file *)ptr)->f_path.dentry,
- spec, fmt);
+ return file_dentry_name(buf, end, ptr, spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
diff --git a/lib/xarray.c b/lib/xarray.c
index 446b956c9188..08d71c7b7599 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -967,17 +968,19 @@ void xas_pause(struct xa_state *xas)
if (xas_invalid(xas))
return;
+ xas->xa_node = XAS_RESTART;
if (node) {
- unsigned int offset = xas->xa_offset;
+ unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ if (xas->xa_index == 0)
+ xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
- xas->xa_node = XAS_RESTART;
}
EXPORT_SYMBOL_GPL(xas_pause);
@@ -994,6 +997,8 @@ void *__xas_prev(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index--;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
@@ -1031,6 +1036,8 @@ void *__xas_next(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index++;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
@@ -1075,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
{
void *entry;
- if (xas_error(xas))
+ if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
+ if (xas->xa_index > max)
+ return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
- } else if (xas_top(xas->xa_node)) {
+ } else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
@@ -1146,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
if (xas_error(xas))
return NULL;
+ if (xas->xa_index > max)
+ goto max;
if (!xas->xa_node) {
xas->xa_index = 1;
@@ -1197,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1820,6 +1833,18 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
}
EXPORT_SYMBOL(xa_find);
+static bool xas_sibling(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned long mask;
+
+ if (!node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
+}
+
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
@@ -1843,21 +1868,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
XA_STATE(xas, xa, *indexp + 1);
void *entry;
+ if (xas.xa_index == 0)
+ return NULL;
+
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
- if (xas.xa_node == XAS_BOUNDS)
+
+ if (xas_invalid(&xas))
break;
- if (xas.xa_shift) {
- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
- continue;
- } else {
- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
- continue;
- }
+ if (xas_sibling(&xas))
+ continue;
if (!xas_retry(&xas, entry))
break;
}
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 2c13ecc5bb2c..ed1f3df27260 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -10,17 +10,6 @@
#ifndef ASMINF
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
union uu {
unsigned short us;
unsigned char b[2];
@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p)
return mm.us;
}
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start)
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
+ in = strm->next_in;
last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
+ out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start)
input data or output space */
do {
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
+ *out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
}
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start)
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start)
state->mode = BAD;
break;
}
- from = window - OFF;
+ from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from end of window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
- from = window - OFF;
+ from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
len -= 3;
}
if (len) {
- PUP(out) = PUP(from);
+ *out++ = *from++;
if (len > 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else {
@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start)
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
len--;
}
- sout = (unsigned short *)(out - OFF);
+ sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
- sfrom = (unsigned short *)(from - OFF);
+ sfrom = (unsigned short *)(from);
loops = len >> 1;
do
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
+ *sout++ = *sfrom++;
#else
- PUP(sout) = UP_UNALIGNED(sfrom);
+ *sout++ = get_unaligned16(sfrom++);
#endif
while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
- pat16 = *(sout-1+OFF);
+ pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start)
}
loops = len >> 1;
do
- PUP(sout) = pat16;
+ *sout++ = pat16;
while (--loops);
- out = (unsigned char *)sout + OFF;
+ out = (unsigned char *)sout;
}
if (len & 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start)
hold &= (1U << bits) - 1;
/* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
+ strm->next_in = in;
+ strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));