aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug22
-rw-r--r--lib/Kconfig.kasan34
-rw-r--r--lib/Kconfig.kfence2
-rw-r--r--lib/Kconfig.kmsan2
-rw-r--r--lib/checksum_kunit.c287
-rw-r--r--lib/crc-ccitt.c55
-rw-r--r--lib/crypto/aesgcm.c13
-rw-r--r--lib/crypto/mpi/ec.c3
-rw-r--r--lib/debugobjects.c200
-rw-r--r--lib/fortify_kunit.c5
-rw-r--r--lib/fw_table.c97
-rw-r--r--lib/iov_iter.c36
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/attributes.c60
-rw-r--r--lib/kunit/debugfs.c102
-rw-r--r--lib/kunit/device-impl.h19
-rw-r--r--lib/kunit/device.c195
-rw-r--r--lib/kunit/executor.c72
-rw-r--r--lib/kunit/kunit-example-test.c87
-rw-r--r--lib/kunit/kunit-test.c139
-rw-r--r--lib/kunit/string-stream-test.c2
-rw-r--r--lib/kunit/string-stream.c2
-rw-r--r--lib/kunit/test.c65
-rw-r--r--lib/maple_tree.c1088
-rw-r--r--lib/nlattr.c6
-rw-r--r--lib/overflow_kunit.c5
-rw-r--r--lib/raid6/s390vx.uc2
-rw-r--r--lib/sbitmap.c5
-rw-r--r--lib/seq_buf.c49
-rw-r--r--lib/stackdepot.c638
-rw-r--r--lib/string.c15
-rw-r--r--lib/test_bpf.c20
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_fortify/write_overflow-strlcpy-src.c5
-rw-r--r--lib/test_fortify/write_overflow-strlcpy.c5
-rw-r--r--lib/test_ida.c2
-rw-r--r--lib/test_maple_tree.c331
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--lib/test_sysctl.c31
-rw-r--r--lib/trace_readwrite.c2
-rw-r--r--lib/ubsan.c7
44 files changed, 2648 insertions, 1103 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3ea1c830efab..5ddda7c2ed9b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -713,10 +713,20 @@ config ARCH_STACKWALK
config STACKDEPOT
bool
select STACKTRACE
+ help
+ Stack depot: stack trace storage that avoids duplication
config STACKDEPOT_ALWAYS_INIT
bool
select STACKDEPOT
+ help
+ Always initialize stack depot during early boot
+
+config STACKDEPOT_MAX_FRAMES
+ int "Maximum number of frames in trace saved in stack depot"
+ range 1 256
+ default 64
+ depends on STACKDEPOT
config REF_TRACKER
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4405f81248fb..ef36b829ae1f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -231,9 +231,10 @@ config DEBUG_INFO
in the "Debug information" choice below, indicating that debug
information will be generated for build targets.
-# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which
-# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
-config AS_HAS_NON_CONST_LEB128
+# Clang generates .uleb128 with label differences for DWARF v5, a feature that
+# older binutils ports do not support when utilizing RISC-V style linker
+# relaxation: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_ULEB128
def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
choice
@@ -258,7 +259,7 @@ config DEBUG_INFO_NONE
config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
bool "Rely on the toolchain's implicit default DWARF version"
select DEBUG_INFO
- depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
help
The implicit default version of DWARF debug info produced by a
toolchain changes over time.
@@ -282,7 +283,8 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
select DEBUG_INFO
- depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ depends on !ARCH_HAS_BROKEN_DWARF5
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
@@ -378,6 +380,8 @@ config DEBUG_INFO_BTF
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ # pahole uses elfutils, which does not have support for Hexagon relocations
+ depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@@ -763,6 +767,8 @@ config DEBUG_STACK_USAGE
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
+ Also emits a message to dmesg when a process exits if that process
+ used more stack space than previously exiting processes.
This option will slow down process creation somewhat.
@@ -1970,7 +1976,6 @@ config FAULT_INJECTION
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
- depends on SLAB || SLUB
help
Provide fault-injection capability for kmalloc.
@@ -2088,10 +2093,6 @@ config KCOV
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
- If RANDOMIZE_BASE is enabled, PC values will not be stable across
- different machines and across reboots. If you need stable PC values,
- disable RANDOMIZE_BASE.
-
For more details, see Documentation/dev-tools/kcov.rst.
config KCOV_ENABLE_COMPARISONS
@@ -2234,6 +2235,7 @@ config TEST_DIV64
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
+ depends on MMU
default KUNIT_ALL_TESTS
help
Enable this to turn on testing of the operation of the I/O iterator
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index fdca89c05745..e6eda054ab27 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
- depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
+ depends on SYSFS && !SLUB_TINY
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
@@ -78,7 +78,7 @@ config KASAN_GENERIC
bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Generic KASAN.
@@ -89,13 +89,11 @@ config KASAN_GENERIC
overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_SW_TAGS
bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Software Tag-Based KASAN.
@@ -110,12 +108,9 @@ config KASAN_SW_TAGS
May potentially introduce problems related to pointer casting and
comparison, as it embeds a tag into the top byte of each pointer.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_HW_TAGS
bool "Hardware Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_HW_TAGS
- depends on SLUB
help
Enables Hardware Tag-Based KASAN.
@@ -134,7 +129,7 @@ endchoice
choice
prompt "Instrumentation type"
depends on KASAN_GENERIC || KASAN_SW_TAGS
- default KASAN_OUTLINE
+ default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE
config KASAN_OUTLINE
bool "Outline instrumentation"
@@ -207,4 +202,25 @@ config KASAN_MODULE_TEST
A part of the KASAN test suite that is not integrated with KUnit.
Incompatible with Hardware Tag-Based KASAN.
+config KASAN_EXTRA_INFO
+ bool "Record and report more information"
+ depends on KASAN
+ help
+ Record and report more information to help us find the cause of the
+ bug and to help us correlate the error with other system events.
+
+ Currently, the CPU number and timestamp are additionally
+ recorded for each heap block at allocation and free time, and
+ 8 bytes will be added to each metadata structure that records
+ allocation or free information.
+
+ In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add
+ 16 bytes of additional memory consumption, and each kmalloc-32
+ object will add 8 bytes of additional memory consumption, not
+ affecting other larger objects.
+
+ In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size
+ boot parameter, it will add 8 * stack_ring_size bytes of additional
+ memory consumption.
+
endif # KASAN
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index 459dda9ef619..6fbbebec683a 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
- depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ depends on HAVE_ARCH_KFENCE
select STACKTRACE
select IRQ_WORK
help
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
index ef2c8f256c57..0541d7b079cc 100644
--- a/lib/Kconfig.kmsan
+++ b/lib/Kconfig.kmsan
@@ -11,7 +11,7 @@ config HAVE_KMSAN_COMPILER
config KMSAN
bool "KMSAN: detector of uninitialized values use"
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
- depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
+ depends on DEBUG_KERNEL && !KASAN && !KCSAN
depends on !PREEMPT_RT
select STACKDEPOT
select STACKDEPOT_ALWAYS_INIT
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 0eed92b77ba3..bf70850035c7 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Test cases csum_partial and csum_fold
+ * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic
*/
#include <kunit/test.h>
#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
#define MAX_LEN 512
#define MAX_ALIGN 64
#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+#define IPv4_MIN_WORDS 5
+#define IPv4_MAX_WORDS 15
+#define NUM_IPv6_TESTS 200
+#define NUM_IP_FAST_CSUM_TESTS 181
+
/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
static const u32 random_init_sum = 0x2847aab;
static const u8 random_buf[] = {
@@ -209,6 +215,237 @@ static const u32 init_sums_no_overflow[] = {
0xffff0000, 0xfffffffb,
};
+static const u16 expected_csum_ipv6_magic[] = {
+ 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0,
+ 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
+ 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
+ 0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682,
+ 0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e,
+ 0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0,
+ 0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2, 0xdb0e,
+ 0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9,
+ 0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e, 0xe044, 0x3402,
+ 0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8, 0x5e00, 0x47c6, 0x3059,
+ 0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc,
+ 0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95, 0x9aed, 0x6ff,
+ 0x5136, 0x2741, 0x660e, 0x8b80, 0xf71, 0xa263, 0x88af, 0x7a73, 0x3c37,
+ 0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80, 0xcd55, 0x6e6,
+ 0x6434, 0x127, 0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b,
+ 0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498, 0x436, 0xb917,
+ 0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168,
+ 0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6,
+ 0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632,
+ 0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb,
+ 0xc49c, 0xe965, 0x4eff, 0x5a, 0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a,
+ 0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988,
+ 0x3845, 0x1014
+};
+
+static const u16 expected_fast_csum[] = {
+ 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187,
+ 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
+ 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
+ 0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c,
+ 0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da,
+ 0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede, 0x790f,
+ 0x579, 0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7, 0x220d,
+ 0xf922, 0xbf9, 0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d, 0x2,
+ 0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50,
+ 0xd55e, 0xcad5, 0xf3c9, 0x8f4, 0x4409, 0x5043, 0x5b50, 0x55b, 0x2205,
+ 0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5,
+ 0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4,
+ 0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551,
+ 0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c,
+ 0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57,
+ 0xe54a, 0x20e5, 0x21, 0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca,
+ 0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81,
+ 0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5,
+ 0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5,
+ 0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373,
+ 0x2503, 0xc22a, 0x3c2, 0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8,
+ 0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116,
+ 0x23f1, 0xcd23, 0xece, 0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee,
+ 0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0,
+ 0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d,
+ 0xa9b7, 0xcaa, 0xeb0c, 0x4eb, 0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9,
+ 0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556,
+ 0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24,
+ 0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137,
+ 0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0,
+ 0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3, 0x1908, 0x2118, 0xc21, 0x990c,
+ 0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634,
+ 0x3f26, 0x793e, 0xc79, 0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4,
+ 0x22b7, 0xa122, 0xa1, 0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f,
+ 0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26,
+ 0xd5aa, 0x7d5, 0x9607, 0x96, 0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd,
+ 0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39,
+ 0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789,
+ 0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58,
+ 0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2,
+ 0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e,
+ 0x3ad3, 0xf3a, 0x870e, 0xc287, 0x53c3, 0xc54, 0x5b0c, 0x7d5a, 0x797d,
+ 0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298,
+ 0xb1f2, 0x69b1, 0xf969, 0xef9, 0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224,
+ 0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47,
+ 0xe6b, 0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d,
+ 0xd0c6, 0x61d1, 0x3a62, 0xc3b, 0x150c, 0x1715, 0x4517, 0x5345, 0x3954,
+ 0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f,
+ 0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335,
+ 0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9,
+ 0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef,
+ 0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77, 0xb10a, 0xbfb1, 0x58bf, 0x5258,
+ 0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8,
+ 0xa99b, 0x52a9, 0x2f53, 0xc30, 0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53,
+ 0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35,
+ 0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156,
+ 0xf721, 0x4ef6, 0x14f, 0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898,
+ 0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21,
+ 0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6,
+ 0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1,
+ 0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef,
+ 0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7, 0x9507, 0xfd94,
+ 0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064,
+ 0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24,
+ 0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e,
+ 0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70,
+ 0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8,
+ 0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93, 0x3500, 0x7134, 0x9071, 0xfa8f,
+ 0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d,
+ 0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447,
+ 0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9,
+ 0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded,
+ 0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee,
+ 0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1,
+ 0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8,
+ 0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141,
+ 0x132, 0xe901, 0x8e9, 0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139,
+ 0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf,
+ 0xc1ce, 0x5c2, 0x3805, 0x6038, 0x5960, 0xd359, 0xdd3, 0xbe0d, 0xafbd,
+ 0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3,
+ 0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653,
+ 0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77,
+ 0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23,
+ 0x25f, 0x1d02, 0x71d, 0x8206, 0xc82, 0x180c, 0x3018, 0x4b30, 0x4b,
+ 0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d,
+ 0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984,
+ 0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056,
+ 0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1,
+ 0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc, 0xb40b, 0x1ab4,
+ 0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb, 0x3c01,
+ 0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38,
+ 0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9,
+ 0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e,
+ 0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65, 0x870b, 0x487,
+ 0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a, 0x7a07, 0x4e7a, 0xfc4e,
+ 0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf,
+ 0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79,
+ 0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a,
+ 0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564,
+ 0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e, 0xee0d, 0x4ced, 0xce4c,
+ 0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639,
+ 0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7,
+ 0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9, 0xc504,
+ 0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd,
+ 0x5718, 0x958, 0xa609, 0x41a5, 0x5842, 0x159, 0x9f01, 0x269f, 0x5a26,
+ 0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4,
+ 0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df, 0xab0a, 0xa5ab, 0x9a6, 0xba09,
+ 0x9ab9, 0xad9a, 0x5ae, 0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5,
+ 0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780, 0x7207, 0x2472, 0x8a23,
+ 0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789,
+ 0xb28, 0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379, 0x5703,
+ 0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b,
+ 0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63,
+ 0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d,
+ 0x4fc, 0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283,
+ 0x593, 0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18,
+ 0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491, 0xbb04,
+ 0x23bb, 0x4124, 0xd41, 0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2,
+ 0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073,
+ 0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89, 0x180f, 0x2a18,
+ 0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e,
+ 0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c,
+ 0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea,
+ 0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2,
+ 0x2c70, 0x232d, 0xf623, 0x4f6, 0x905, 0x7509, 0xd675, 0x28d7, 0x9428,
+ 0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed,
+ 0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f,
+ 0x83c4, 0xa84, 0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4,
+ 0xed1b, 0xd6ed, 0xe0d6, 0x5e1, 0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99,
+ 0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a,
+ 0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7,
+ 0x109, 0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5,
+ 0xcb0a, 0x4cb, 0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763,
+ 0xc17, 0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb,
+ 0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50,
+ 0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b,
+ 0x411, 0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188,
+ 0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47,
+ 0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6,
+ 0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a,
+ 0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0,
+ 0x2a7, 0xe702, 0xf5e6, 0x9af5, 0xf9b, 0x810f, 0x8080, 0x180, 0x1702,
+ 0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b,
+ 0xfc7, 0x5510, 0x5555, 0x7655, 0x177, 0x2b02, 0x6f2a, 0xb70, 0x9f0b,
+ 0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186,
+ 0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d, 0x530e, 0x3153, 0xa531,
+ 0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538, 0x5,
+ 0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3,
+ 0xb2a1, 0x5b2, 0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72,
+ 0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0,
+ 0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb,
+ 0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985,
+ 0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f,
+ 0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e,
+ 0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39,
+ 0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb, 0x9104, 0x9d90,
+ 0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e,
+ 0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d,
+ 0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b,
+ 0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57,
+ 0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3,
+ 0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f, 0x530a, 0x3f52, 0xa33e,
+ 0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b,
+ 0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837,
+ 0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c,
+ 0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9,
+ 0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a, 0x900e,
+ 0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911,
+ 0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e,
+ 0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10,
+ 0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180,
+ 0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df,
+ 0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474,
+ 0xb714, 0x7db6, 0x857d, 0xa85, 0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278,
+ 0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c,
+ 0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6,
+ 0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f,
+ 0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773, 0x9d07, 0xe99c,
+ 0x27ea, 0x2028, 0xc20, 0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2,
+ 0x2449, 0x7024, 0x570, 0x3e05, 0xd3e, 0xf60c, 0xbbf5, 0x69bb, 0x3f6a,
+ 0x740, 0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf,
+ 0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993,
+ 0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f,
+ 0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413,
+ 0x5a5, 0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f,
+ 0xe48c, 0xd0e4, 0x4d1, 0xd104, 0xe4d0, 0xcee4, 0xfcf, 0x480f, 0xa447,
+ 0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469,
+ 0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd,
+ 0xb3ab, 0xcb4, 0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83,
+ 0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be,
+ 0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1,
+ 0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a,
+ 0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe,
+ 0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8,
+ 0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca,
+ 0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e, 0x7b00, 0x6a7b, 0xa46a,
+ 0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab,
+ 0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf,
+ 0xeb1c, 0xd1ea, 0x4ad2, 0xd4b, 0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13,
+ 0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf,
+ 0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479,
+ 0xb7a4
+};
+
static u8 tmp_buf[TEST_BUFLEN];
#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
@@ -338,10 +575,58 @@ static void test_csum_no_carry_inputs(struct kunit *test)
}
}
+static void test_ip_fast_csum(struct kunit *test)
+{
+ __sum16 csum_result;
+ u16 expected;
+
+ for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
+ for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
+ csum_result = ip_fast_csum(random_buf + index, len);
+ expected =
+ expected_fast_csum[(len - IPv4_MIN_WORDS) *
+ NUM_IP_FAST_CSUM_TESTS +
+ index];
+ CHECK_EQ(to_sum16(expected), csum_result);
+ }
+ }
+}
+
+static void test_csum_ipv6_magic(struct kunit *test)
+{
+#if defined(CONFIG_NET)
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ unsigned int len;
+ unsigned char proto;
+ __wsum csum;
+
+ const int daddr_offset = sizeof(struct in6_addr);
+ const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int);
+ const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int) + sizeof(char);
+
+ for (int i = 0; i < NUM_IPv6_TESTS; i++) {
+ saddr = (const struct in6_addr *)(random_buf + i);
+ daddr = (const struct in6_addr *)(random_buf + i +
+ daddr_offset);
+ len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
+ proto = *(random_buf + i + proto_offset);
+ csum = *(__wsum *)(random_buf + i + csum_offset);
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
+ csum_ipv6_magic(saddr, daddr, len, proto, csum));
+ }
+#endif /* !CONFIG_NET */
+}
+
static struct kunit_case __refdata checksum_test_cases[] = {
KUNIT_CASE(test_csum_fixed_random_inputs),
KUNIT_CASE(test_csum_all_carry_inputs),
KUNIT_CASE(test_csum_no_carry_inputs),
+ KUNIT_CASE(test_ip_fast_csum),
+ KUNIT_CASE(test_csum_ipv6_magic),
{}
};
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d1a7d29d2ac9..9cddf35d3b66 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -49,46 +49,6 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
-/*
- * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
- * Reflected bits order, does not augment final value.
- */
-u16 const crc_ccitt_false_table[256] = {
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
- 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
- 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
- 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
- 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
- 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
- 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
- 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
- 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
- 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
- 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
- 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
- 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
- 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
- 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
- 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
- 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
- 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
- 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
- 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
- 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
- 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
- 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
-};
-EXPORT_SYMBOL(crc_ccitt_false_table);
-
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
@@ -104,20 +64,5 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
-/**
- * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
- * for the data buffer
- * @crc: previous CRC value
- * @buffer: data pointer
- * @len: number of bytes in the buffer
- */
-u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
-{
- while (len--)
- crc = crc_ccitt_false_byte(crc, *buffer++);
- return crc;
-}
-EXPORT_SYMBOL(crc_ccitt_false);
-
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index c632d6e17af8..6bba6473fdf3 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -73,6 +73,19 @@ static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
}
}
+/**
+ * aesgcm_mac - Generates the authentication tag using AES-GCM algorithm.
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @src: The input source data.
+ * @src_len: Length of the source data.
+ * @assoc: Points to the associated data.
+ * @assoc_len: Length of the associated data values.
+ * @ctr: Points to the counter value.
+ * @authtag: The output buffer for the authentication tag.
+ *
+ * It takes in the AES-GCM context, source data, associated data, counter value,
+ * and an output buffer for the authentication tag.
+ */
static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
{
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
index 40f5908e57a4..e16dca1e23d5 100644
--- a/lib/crypto/mpi/ec.c
+++ b/lib/crypto/mpi/ec.c
@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
ctx->a = mpi_copy(a);
ctx->b = mpi_copy(b);
+ ctx->d = NULL;
+ ctx->t.two_inv_p = NULL;
+
ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
mpi_ec_get_reset(ctx);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2a8e9d63fbe3..fb12a9bacd2f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -620,9 +620,8 @@ static void debug_objects_fill_pool(void)
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
debug_objects_fill_pool();
@@ -643,24 +642,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
- break;
-
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
- debug_object_fixup(descr->fixup_init, addr, state);
- return;
-
- case ODEBUG_STATE_DESTROYED:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
return;
default:
break;
}
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "init");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_init, addr, o.state);
}
/**
@@ -701,11 +694,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
- enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- int ret;
if (!debug_objects_enabled)
return 0;
@@ -717,49 +708,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object_or_alloc(addr, db, descr, false, true);
- if (likely(!IS_ERR_OR_NULL(obj))) {
- bool print_object = false;
-
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return 0;
+ } else if (likely(!IS_ERR(obj))) {
switch (obj->state) {
- case ODEBUG_STATE_INIT:
- case ODEBUG_STATE_INACTIVE:
- obj->state = ODEBUG_STATE_ACTIVE;
- ret = 0;
- break;
-
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, state);
- return ret ? 0 : -EINVAL;
-
case ODEBUG_STATE_DESTROYED:
- print_object = true;
- ret = -EINVAL;
+ o = *obj;
break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ fallthrough;
default:
- ret = 0;
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return 0;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "activate");
- return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "activate");
- /* If NULL the allocation has hit OOM */
- if (!obj) {
- debug_objects_oom();
- return 0;
+ switch (o.state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
+ return 0;
+ fallthrough;
+ default:
+ return -EINVAL;
}
-
- /* Object is neither static nor tracked. It's not initialized */
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -770,10 +750,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
*/
void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -785,33 +765,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- if (!obj->astate)
- obj->state = ODEBUG_STATE_INACTIVE;
- else
- print_object = true;
- break;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
+ if (obj->astate)
+ break;
+ obj->state = ODEBUG_STATE_INACTIVE;
+ fallthrough;
default:
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "deactivate");
- } else if (print_object) {
- debug_print_object(obj, "deactivate");
- }
+ debug_print_object(&o, "deactivate");
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -822,11 +793,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
*/
void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -836,32 +805,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
- break;
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
+ fallthrough;
+ default:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "destroy");
- debug_object_fixup(descr->fixup_destroy, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
- default:
- break;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "destroy");
+ debug_print_object(&o, "destroy");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_destroy, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -872,9 +840,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
*/
void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -885,24 +852,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free, addr, state);
- return;
+ break;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "free");
+
+ debug_object_fixup(descr->fixup_free, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_free);
@@ -954,10 +923,10 @@ void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -970,28 +939,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- if (obj->astate == expect)
- obj->astate = next;
- else
- print_object = true;
- break;
-
+ if (obj->astate != expect)
+ break;
+ obj->astate = next;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
default:
- print_object = true;
break;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "active_state");
- } else if (print_object) {
- debug_print_object(obj, "active_state");
- }
+ debug_print_object(&o, "active_state");
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -999,12 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- const struct debug_obj_descr *descr;
- enum debug_obj_state state;
+ int cnt, objs_checked = 0;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
struct hlist_node *tmp;
- struct debug_obj *obj;
- int cnt, objs_checked = 0;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -1026,12 +984,10 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- descr = obj->descr;
- state = obj->state;
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free,
- (void *) oaddr, state);
+ debug_print_object(&o, "free");
+ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
goto repeat;
default:
hlist_del(&obj->node);
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index c8c33cbaae9e..2e4fedc81621 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -15,6 +15,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/device.h>
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -269,7 +270,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
size_t len; \
\
/* Create dummy device for devm_kmalloc()-family tests. */ \
- dev = root_device_register(dev_name); \
+ dev = kunit_device_register(test, dev_name); \
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
"Cannot register test device\n"); \
\
@@ -303,7 +304,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
devm_kfree(dev, p)); \
\
- device_unregister(dev); \
+ kunit_device_unregister(test, dev); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
diff --git a/lib/fw_table.c b/lib/fw_table.c
index 294df54e33b6..c3569d2ba503 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -12,12 +12,14 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/fw_table.h>
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
ACPI_SUBTABLE_CEDT,
+ CDAT_SUBTABLE,
};
struct acpi_subtable_entry {
@@ -25,7 +27,7 @@ struct acpi_subtable_entry {
enum acpi_subtable_type type;
};
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -37,11 +39,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return 0;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.type;
+ case CDAT_SUBTABLE:
+ return entry->hdr->cdat.type;
}
return 0;
}
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -53,11 +57,16 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->prmt.length;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.length;
+ case CDAT_SUBTABLE: {
+ __le16 length = (__force __le16)entry->hdr->cdat.length;
+
+ return le16_to_cpu(length);
+ }
}
return 0;
}
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -69,11 +78,13 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->prmt);
case ACPI_SUBTABLE_CEDT:
return sizeof(entry->hdr->cedt);
+ case CDAT_SUBTABLE:
+ return sizeof(entry->hdr->cdat);
}
return 0;
}
-static enum acpi_subtable_type __init_or_acpilib
+static enum acpi_subtable_type __init_or_fwtbl_lib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
@@ -82,17 +93,27 @@ acpi_get_subtable_type(char *id)
return ACPI_SUBTABLE_PRMT;
if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
return ACPI_SUBTABLE_CEDT;
+ if (strncmp(id, ACPI_SIG_CDAT, 4) == 0)
+ return CDAT_SUBTABLE;
return ACPI_SUBTABLE_COMMON;
}
-static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
+static unsigned long __init_or_fwtbl_lib
+acpi_table_get_length(enum acpi_subtable_type type,
+ union fw_table_header *header)
{
- return proc->handler || proc->handler_arg;
+ if (type == CDAT_SUBTABLE) {
+ __le32 length = (__force __le32)header->cdat.length;
+
+ return le32_to_cpu(length);
+ }
+
+ return header->acpi.length;
}
-static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
- union acpi_subtable_headers *hdr,
- unsigned long end)
+static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
{
if (proc->handler)
return proc->handler(hdr, end);
@@ -124,46 +145,42 @@ static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-int __init_or_acpilib
+int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_table_header *table_header,
+ union fw_table_header *table_header,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
unsigned long table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
+ enum acpi_subtable_type type;
int count = 0;
- int errs = 0;
int i;
- table_end = (unsigned long)table_header + table_header->length;
+ type = acpi_get_subtable_type(id);
+ table_end = (unsigned long)table_header +
+ acpi_table_get_length(type, table_header);
/* Parse all entries looking for a match. */
- entry.type = acpi_get_subtable_type(id);
+ entry.type = type;
entry.hdr = (union acpi_subtable_headers *)
((unsigned long)table_header + table_size);
subtable_len = acpi_get_subtable_header_length(&entry);
- while (((unsigned long)entry.hdr) + subtable_len < table_end) {
- if (max_entries && count >= max_entries)
- break;
-
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
- if (!has_handler(&proc[i]) ||
- (!errs &&
- call_handler(&proc[i], entry.hdr, table_end))) {
- errs++;
- continue;
- }
+
+ if (!max_entries || count < max_entries)
+ if (call_handler(&proc[i], entry.hdr, table_end))
+ return -EINVAL;
proc[i].count++;
+ count++;
break;
}
- if (i != proc_num)
- count++;
/*
* If entry->length is 0, break from this loop to avoid
@@ -180,9 +197,31 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
}
if (max_entries && count > max_entries) {
- pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
- id, proc->id, count);
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, proc->id, count - max_entries, count);
}
- return errs ? -EINVAL : count;
+ return count;
+}
+
+int __init_or_fwtbl_lib
+cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg,
+ void *arg,
+ struct acpi_table_cdat *table_header)
+{
+ struct acpi_subtable_proc proc = {
+ .id = type,
+ .handler_arg = handler_arg,
+ .arg = arg,
+ };
+
+ if (!table_header)
+ return -EINVAL;
+
+ return acpi_parse_entries_array(ACPI_SIG_CDAT,
+ sizeof(struct acpi_table_cdat),
+ (union fw_table_header *)table_header,
+ &proc, 1, 0);
}
+EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8ff6824a1005..cf2eb2b2f983 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
- .copy_mc = false,
.nofault = false,
.data_source = direction,
.__iov = iov,
@@ -245,26 +244,8 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
static __always_inline
-size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
- size_t len, void *to, void *priv2)
-{
- return copy_mc_to_kernel(to + progress, iter_from, len);
-}
-
-static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(i->count < bytes))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
-}
-
-static __always_inline
size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_copy_mc(i)))
- return __copy_from_iter_mc(addr, bytes, i);
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter, memcpy_from_iter);
}
@@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_KVEC,
- .copy_mc = false,
.data_source = direction,
.kvec = kvec,
.nr_segs = nr_segs,
@@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_BVEC,
- .copy_mc = false,
.data_source = direction,
.bvec = bvec,
.nr_segs = nr_segs,
@@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_XARRAY,
- .copy_mc = false,
.data_source = direction,
.xarray = xarray,
.xarray_start = start,
@@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
BUG_ON(direction != READ);
*i = (struct iov_iter){
.iter_type = ITER_DISCARD,
- .copy_mc = false,
.data_source = false,
.count = count,
.iov_offset = 0
@@ -1369,19 +1346,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
}
EXPORT_SYMBOL(import_iovec);
-int import_single_range(int rw, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i)
-{
- if (len > MAX_RW_COUNT)
- len = MAX_RW_COUNT;
- if (unlikely(!access_ok(buf, len)))
- return -EFAULT;
-
- iov_iter_ubuf(i, rw, buf, len);
- return 0;
-}
-EXPORT_SYMBOL(import_single_range);
-
int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
diff --git a/lib/kobject.c b/lib/kobject.c
index 59dbcbdb1c91..72fa20f405f1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- error = sysfs_create_groups(kobj, ktype->default_groups);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
+ if (ktype) {
+ error = sysfs_create_groups(kobj, ktype->default_groups);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
+ }
}
/*
@@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj)
sd = kobj->sd;
ktype = get_ktype(kobj);
- sysfs_remove_groups(kobj, ktype->default_groups);
+ if (ktype)
+ sysfs_remove_groups(kobj, ktype->default_groups);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
@@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
+ if (t && !t->release)
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ kobject_name(kobj), kobj);
+
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("'%s' (%p): auto cleanup kobject_del\n",
@@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj)
parent = NULL;
}
- if (t->release) {
+ if (t && t->release) {
pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
- } else {
- pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
- kobject_name(kobj), kobj);
}
/* free name if we allocated it */
@@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
{
const struct kobj_ns_type_operations *ops = NULL;
- if (parent && parent->ktype->child_ns_type)
+ if (parent && parent->ktype && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 46f75f23dfe4..309659a32a78 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -7,7 +7,8 @@ kunit-objs += test.o \
assert.o \
try-catch.o \
executor.o \
- attributes.o
+ attributes.o \
+ device.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
index 1b512f7e1838..2cf04cc09372 100644
--- a/lib/kunit/attributes.c
+++ b/lib/kunit/attributes.c
@@ -58,6 +58,16 @@ static const char *attr_enum_to_string(void *attr, const char * const str_list[]
return str_list[val];
}
+static const char *attr_bool_to_string(void *attr, bool *to_free)
+{
+ bool val = (bool)attr;
+
+ *to_free = false;
+ if (val)
+ return "true";
+ return "false";
+}
+
static const char *attr_speed_to_string(void *attr, bool *to_free)
{
return attr_enum_to_string(attr, speed_str_list, to_free);
@@ -166,6 +176,37 @@ static int attr_string_filter(void *attr, const char *input, int *err)
return false;
}
+static int attr_bool_filter(void *attr, const char *input, int *err)
+{
+ int i, input_int = -1;
+ long val = (long)attr;
+ const char *input_str = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_str = input + i;
+ break;
+ }
+ }
+
+ if (!input_str) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ if (!strcmp(input_str, "true"))
+ input_int = (int)true;
+ else if (!strcmp(input_str, "false"))
+ input_int = (int)false;
+ else {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(val, input, input_int, err);
+}
/* Get Attribute Methods */
@@ -194,6 +235,17 @@ static void *attr_module_get(void *test_or_suite, bool is_test)
return (void *) "";
}
+static void *attr_is_init_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) NULL);
+ else
+ return ((void *) suite->is_init);
+}
+
/* List of all Test Attributes */
static struct kunit_attr kunit_attr_list[] = {
@@ -212,6 +264,14 @@ static struct kunit_attr kunit_attr_list[] = {
.filter = attr_string_filter,
.attr_default = (void *)"",
.print = PRINT_SUITE,
+ },
+ {
+ .name = "is_init",
+ .get_attr = attr_is_init_get,
+ .to_string = attr_bool_to_string,
+ .filter = attr_bool_filter,
+ .attr_default = (void *)false,
+ .print = PRINT_SUITE,
}
};
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 270d185737e6..d548750a325a 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -8,12 +8,14 @@
#include <linux/module.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include "string-stream.h"
#include "debugfs.h"
#define KUNIT_DEBUGFS_ROOT "kunit"
#define KUNIT_DEBUGFS_RESULTS "results"
+#define KUNIT_DEBUGFS_RUN "run"
/*
* Create a debugfs representation of test suites:
@@ -21,6 +23,8 @@
* Path Semantics
* /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
* testsuite
+ * /sys/kernel/debug/kunit/<testsuite>/run Write to this file to trigger
+ * testsuite to run
*
*/
@@ -60,12 +64,14 @@ static void debugfs_print_result(struct seq_file *seq, struct string_stream *log
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
- enum kunit_status success = kunit_suite_has_succeeded(suite);
+ enum kunit_status success;
struct kunit_case *test_case;
if (!suite)
return 0;
+ success = kunit_suite_has_succeeded(suite);
+
/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
seq_puts(seq, "KTAP version 1\n");
seq_puts(seq, "1..1\n");
@@ -99,6 +105,51 @@ static int debugfs_results_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_print_results, suite);
}
+/*
+ * Print a usage message to the debugfs "run" file
+ * (/sys/kernel/debug/kunit/<testsuite>/run) if opened.
+ */
+static int debugfs_print_run(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+
+ seq_puts(seq, "Write to this file to trigger the test suite to run.\n");
+ seq_printf(seq, "usage: echo \"any string\" > /sys/kernel/debugfs/kunit/%s/run\n",
+ suite->name);
+ return 0;
+}
+
+/*
+ * The debugfs "run" file (/sys/kernel/debug/kunit/<testsuite>/run)
+ * contains no information. Write to the file to trigger the test suite
+ * to run.
+ */
+static int debugfs_run_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_run, suite);
+}
+
+/*
+ * Trigger a test suite to run by writing to the suite's "run" debugfs
+ * file found at: /sys/kernel/debug/kunit/<testsuite>/run
+ *
+ * Note: what is written to this file will not be saved.
+ */
+static ssize_t debugfs_run(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct inode *f_inode = file->f_inode;
+ struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private;
+
+ __kunit_test_suites_init(&suite, 1);
+
+ return count;
+}
+
static const struct file_operations debugfs_results_fops = {
.open = debugfs_results_open,
.read = seq_read,
@@ -106,17 +157,43 @@ static const struct file_operations debugfs_results_fops = {
.release = debugfs_release,
};
+static const struct file_operations debugfs_run_fops = {
+ .open = debugfs_run_open,
+ .read = seq_read,
+ .write = debugfs_run,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
void kunit_debugfs_create_suite(struct kunit_suite *suite)
{
struct kunit_case *test_case;
+ struct string_stream *stream;
+
+ /* If suite log already allocated, do not create new debugfs files. */
+ if (suite->log)
+ return;
- /* Allocate logs before creating debugfs representation. */
- suite->log = alloc_string_stream(GFP_KERNEL);
- string_stream_set_append_newlines(suite->log, true);
+ /*
+ * Allocate logs before creating debugfs representation.
+ * The suite->log and test_case->log pointer are expected to be NULL
+ * if there isn't a log, so only set it if the log stream was created
+ * successfully.
+ */
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ return;
+
+ string_stream_set_append_newlines(stream, true);
+ suite->log = stream;
kunit_suite_for_each_test_case(suite, test_case) {
- test_case->log = alloc_string_stream(GFP_KERNEL);
- string_stream_set_append_newlines(test_case->log, true);
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ goto err;
+
+ string_stream_set_append_newlines(stream, true);
+ test_case->log = stream;
}
suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
@@ -124,6 +201,19 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
suite->debugfs,
suite, &debugfs_results_fops);
+
+ /* Do not create file to re-run test if test runs on init */
+ if (!suite->is_init) {
+ debugfs_create_file(KUNIT_DEBUGFS_RUN, S_IFREG | 0644,
+ suite->debugfs,
+ suite, &debugfs_run_fops);
+ }
+ return;
+
+err:
+ string_stream_destroy(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ string_stream_destroy(test_case->log);
}
void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h
new file mode 100644
index 000000000000..5fcd48ff0f36
--- /dev/null
+++ b/lib/kunit/device-impl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit internal header for device helpers
+ *
+ * Header for KUnit-internal driver / bus management.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_IMPL_H
+#define _KUNIT_DEVICE_IMPL_H
+
+// For internal use only -- registers the kunit_bus.
+int kunit_bus_init(void);
+// For internal use only -- unregisters the kunit_bus.
+void kunit_bus_shutdown(void);
+
+#endif //_KUNIT_DEVICE_IMPL_H
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
new file mode 100644
index 000000000000..644a38a1f5b1
--- /dev/null
+++ b/lib/kunit/device.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit-managed device implementation
+ *
+ * Implementation of struct kunit_device helpers for fake devices whose
+ * lifecycle is managed by KUnit.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#include <linux/device.h>
+
+#include <kunit/test.h>
+#include <kunit/device.h>
+#include <kunit/resource.h>
+
+#include "device-impl.h"
+
+/* Wrappers for use with kunit_add_action() */
+KUNIT_DEFINE_ACTION_WRAPPER(device_unregister_wrapper, device_unregister, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(driver_unregister_wrapper, driver_unregister, struct device_driver *);
+
+/* The root device for the KUnit bus, parent of all kunit_devices. */
+static struct device *kunit_bus_device;
+
+/* A device owned by a KUnit test. */
+struct kunit_device {
+ struct device dev;
+ /* The KUnit test which owns this device. */
+ struct kunit *owner;
+ /* If the driver is managed by KUnit and unique to this device. */
+ const struct device_driver *driver;
+};
+
+#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
+
+static struct bus_type kunit_bus_type = {
+ .name = "kunit",
+};
+
+/* Register the 'kunit_bus' used for fake devices. */
+int kunit_bus_init(void)
+{
+ int error;
+
+ kunit_bus_device = root_device_register("kunit");
+ if (IS_ERR(kunit_bus_device))
+ return PTR_ERR(kunit_bus_device);
+
+ error = bus_register(&kunit_bus_type);
+ if (error)
+ bus_unregister(&kunit_bus_type);
+ return error;
+}
+
+/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */
+void kunit_bus_shutdown(void)
+{
+ /* Make sure the bus exists before we unregister it. */
+ if (IS_ERR_OR_NULL(kunit_bus_device))
+ return;
+
+ bus_unregister(&kunit_bus_type);
+
+ root_device_unregister(kunit_bus_device);
+
+ kunit_bus_device = NULL;
+}
+
+/* Release a 'fake' KUnit device. */
+static void kunit_device_release(struct device *d)
+{
+ kfree(to_kunit_device(d));
+}
+
+/*
+ * Create and register a KUnit-managed struct device_driver on the kunit_bus.
+ * Returns an error pointer on failure.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name)
+{
+ struct device_driver *driver;
+ int err = -ENOMEM;
+
+ driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return ERR_PTR(err);
+
+ driver->name = name;
+ driver->bus = &kunit_bus_type;
+ driver->owner = THIS_MODULE;
+
+ err = driver_register(driver);
+ if (err) {
+ kunit_kfree(test, driver);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, driver_unregister_wrapper, driver);
+ return driver;
+}
+EXPORT_SYMBOL_GPL(kunit_driver_create);
+
+/* Helper which creates a kunit_device, attaches it to the kunit_bus*/
+static struct kunit_device *kunit_device_register_internal(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev;
+ int err = -ENOMEM;
+
+ kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL);
+ if (!kunit_dev)
+ return ERR_PTR(err);
+
+ kunit_dev->owner = test;
+
+ err = dev_set_name(&kunit_dev->dev, "%s.%s", test->name, name);
+ if (err) {
+ kfree(kunit_dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_dev->dev.release = kunit_device_release;
+ kunit_dev->dev.bus = &kunit_bus_type;
+ kunit_dev->dev.parent = kunit_bus_device;
+
+ err = device_register(&kunit_dev->dev);
+ if (err) {
+ put_device(&kunit_dev->dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev);
+
+ return kunit_dev;
+}
+
+/*
+ * Create and register a new KUnit-managed device, using the user-supplied device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev = kunit_device_register_internal(test, name, drv);
+
+ if (IS_ERR_OR_NULL(kunit_dev))
+ return ERR_CAST(kunit_dev);
+
+ return &kunit_dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register_with_driver);
+
+/*
+ * Create and register a new KUnit-managed device, including a matching device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name)
+{
+ struct device_driver *drv;
+ struct kunit_device *dev;
+
+ drv = kunit_driver_create(test, name);
+ if (IS_ERR(drv))
+ return ERR_CAST(drv);
+
+ dev = kunit_device_register_internal(test, name, drv);
+ if (IS_ERR(dev)) {
+ kunit_release_action(test, driver_unregister_wrapper, (void *)drv);
+ return ERR_CAST(dev);
+ }
+
+ /* Request the driver be freed. */
+ dev->driver = drv;
+
+
+ return &dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register);
+
+/* Unregisters a KUnit-managed device early (including the driver, if automatically created). */
+void kunit_device_unregister(struct kunit *test, struct device *dev)
+{
+ const struct device_driver *driver = to_kunit_device(dev)->driver;
+
+ kunit_release_action(test, device_unregister_wrapper, dev);
+ if (driver)
+ kunit_release_action(test, driver_unregister_wrapper, (void *)driver);
+}
+EXPORT_SYMBOL_GPL(kunit_device_unregister);
+
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 1236b3cd2fbb..689fff2b2b10 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -12,6 +12,8 @@
*/
extern struct kunit_suite * const __kunit_suites_start[];
extern struct kunit_suite * const __kunit_suites_end[];
+extern struct kunit_suite * const __kunit_init_suites_start[];
+extern struct kunit_suite * const __kunit_init_suites_end[];
static char *action_param;
@@ -144,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
kfree(suite_set.start);
}
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
struct kunit_suite_set
kunit_filter_suites(const struct kunit_suite_set *suite_set,
const char *filter_glob,
@@ -292,6 +298,37 @@ void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
}
}
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set)
+{
+ struct kunit_suite_set total_suite_set = {NULL, NULL};
+ struct kunit_suite **total_suite_start = NULL;
+ size_t init_num_suites, num_suites, suite_size;
+ int i = 0;
+
+ init_num_suites = init_suite_set.end - init_suite_set.start;
+ num_suites = suite_set.end - suite_set.start;
+ suite_size = sizeof(suite_set.start);
+
+ /* Allocate memory for array of all kunit suites */
+ total_suite_start = kmalloc_array(init_num_suites + num_suites, suite_size, GFP_KERNEL);
+ if (!total_suite_start)
+ return total_suite_set;
+
+ /* Append and mark init suites and then append all other kunit suites */
+ memcpy(total_suite_start, init_suite_set.start, init_num_suites * suite_size);
+ for (i = 0; i < init_num_suites; i++)
+ total_suite_start[i]->is_init = true;
+
+ memcpy(total_suite_start + init_num_suites, suite_set.start, num_suites * suite_size);
+
+ /* Set kunit suite set start and end */
+ total_suite_set.start = total_suite_start;
+ total_suite_set.end = total_suite_start + (init_num_suites + num_suites);
+
+ return total_suite_set;
+}
+
#if IS_BUILTIN(CONFIG_KUNIT)
static char *kunit_shutdown;
@@ -313,21 +350,41 @@ static void kunit_handle_shutdown(void)
int kunit_run_all_tests(void)
{
- struct kunit_suite_set suite_set = {
+ struct kunit_suite_set suite_set = {NULL, NULL};
+ struct kunit_suite_set filtered_suite_set = {NULL, NULL};
+ struct kunit_suite_set init_suite_set = {
+ __kunit_init_suites_start, __kunit_init_suites_end,
+ };
+ struct kunit_suite_set normal_suite_set = {
__kunit_suites_start, __kunit_suites_end,
};
+ size_t init_num_suites = init_suite_set.end - init_suite_set.start;
int err = 0;
+
+ if (init_num_suites > 0) {
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ if (!suite_set.start)
+ goto out;
+ } else
+ suite_set = normal_suite_set;
+
if (!kunit_enabled()) {
pr_info("kunit: disabled\n");
- goto out;
+ goto free_out;
}
if (filter_glob_param || filter_param) {
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filtered_suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
filter_param, filter_action_param, &err);
+
+ /* Free original suite set before using filtered suite set */
+ if (init_num_suites > 0)
+ kfree(suite_set.start);
+ suite_set = filtered_suite_set;
+
if (err) {
pr_err("kunit executor: error filtering suites: %d\n", err);
- goto out;
+ goto free_out;
}
}
@@ -340,9 +397,12 @@ int kunit_run_all_tests(void)
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
- if (filter_glob_param || filter_param) { /* a copy was made of each suite */
+free_out:
+ if (filter_glob_param || filter_param)
kunit_free_suite_set(suite_set);
- }
+ else if (init_num_suites > 0)
+ /* Don't use kunit_free_suite_set because suites aren't individually allocated */
+ kfree(suite_set.start);
out:
kunit_handle_shutdown();
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 6bb5c2ef6696..798924f7cc86 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -169,6 +169,16 @@ static int subtract_one(int i)
}
/*
+ * If the function to be replaced is static within a module it is
+ * useful to export a pointer to that function instead of having
+ * to change the static function to a non-static exported function.
+ *
+ * This pointer simulates a module exporting a pointer to a static
+ * function.
+ */
+static int (* const add_one_fn_ptr)(int i) = add_one;
+
+/*
* This test shows the use of static stubs.
*/
static void example_static_stub_test(struct kunit *test)
@@ -187,6 +197,30 @@ static void example_static_stub_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, add_one(1), 2);
}
+/*
+ * This test shows the use of static stubs when the function being
+ * replaced is provided as a pointer-to-function instead of the
+ * actual function. This is useful for providing access to static
+ * functions in a module by exporting a pointer to that function
+ * instead of having to change the static function to a non-static
+ * exported function.
+ */
+static void example_static_stub_using_fn_ptr_test(struct kunit *test)
+{
+ /* By default, function is not stubbed. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+
+ /* Replace add_one() with subtract_one(). */
+ kunit_activate_static_stub(test, add_one_fn_ptr, subtract_one);
+
+ /* add_one() is now replaced. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 0);
+
+ /* Return add_one() to normal. */
+ kunit_deactivate_static_stub(test, add_one_fn_ptr);
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+}
+
static const struct example_param {
int value;
} example_params_array[] = {
@@ -222,6 +256,20 @@ static void example_params_test(struct kunit *test)
}
/*
+ * This test shows the use of test->priv.
+ */
+static void example_priv_test(struct kunit *test)
+{
+ /* unless setup in suite->init(), test->priv is NULL */
+ KUNIT_ASSERT_NULL(test, test->priv);
+
+ /* but can be used to pass arbitrary data to other functions */
+ test->priv = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, test->priv);
+ KUNIT_ASSERT_PTR_EQ(test, test->priv, kunit_get_current_test()->priv);
+}
+
+/*
* This test should always pass. Can be used to practice filtering attributes.
*/
static void example_slow_test(struct kunit *test)
@@ -245,6 +293,8 @@ static struct kunit_case example_test_cases[] = {
KUNIT_CASE(example_mark_skipped_test),
KUNIT_CASE(example_all_expect_macros_test),
KUNIT_CASE(example_static_stub_test),
+ KUNIT_CASE(example_static_stub_using_fn_ptr_test),
+ KUNIT_CASE(example_priv_test),
KUNIT_CASE_PARAM(example_params_test, example_gen_params),
KUNIT_CASE_SLOW(example_slow_test),
{}
@@ -287,4 +337,41 @@ static struct kunit_suite example_test_suite = {
*/
kunit_test_suites(&example_test_suite);
+static int __init init_add(int x, int y)
+{
+ return (x + y);
+}
+
+/*
+ * This test should always pass. Can be used to test init suites.
+ */
+static void __init example_init_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, init_add(1, 1), 2);
+}
+
+/*
+ * The kunit_case struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_case __refdata example_init_test_cases[] = {
+ KUNIT_CASE(example_init_test),
+ {}
+};
+
+/*
+ * The kunit_suite struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_suite example_init_test_suite = {
+ .name = "example_init",
+ .test_cases = example_init_test_cases,
+};
+
+/*
+ * This registers the test suite and marks the suite as using init data
+ * and/or functions.
+ */
+kunit_test_init_section_suites(&example_init_test_suite);
+
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index de2113a58fa0..f7980ef236a3 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -5,9 +5,13 @@
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include "linux/gfp_types.h"
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include <linux/device.h>
+#include <kunit/device.h>
+
#include "string-stream.h"
#include "try-catch-impl.h"
@@ -538,10 +542,7 @@ static struct kunit_suite kunit_resource_test_suite = {
#if IS_BUILTIN(CONFIG_KUNIT_TEST)
/* This avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
-static void kfree_wrapper(void *p)
-{
- kfree(p);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
static void kunit_log_test(struct kunit *test)
{
@@ -690,6 +691,134 @@ static struct kunit_case kunit_current_test_cases[] = {
{}
};
+static void test_dev_action(void *priv)
+{
+ *(void **)priv = (void *)1;
+}
+
+static void kunit_device_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+static void kunit_device_cleanup_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ /* Add an action to verify cleanup. */
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ /* Force KUnit to run cleanup early. */
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+struct driver_test_state {
+ bool driver_device_probed;
+ bool driver_device_removed;
+ long action_was_run;
+};
+
+static int driver_probe_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_probed = true;
+ return 0;
+}
+
+static int driver_remove_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_removed = true;
+ return 0;
+}
+
+static void kunit_device_driver_test(struct kunit *test)
+{
+ struct device_driver *test_driver;
+ struct device *test_device;
+ struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL);
+
+ test->priv = test_state;
+ test_driver = kunit_driver_create(test, "my_driver");
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_driver);
+
+ test_driver->probe = driver_probe_hook;
+ test_driver->remove = driver_remove_hook;
+
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Make sure the probe function was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &test_state->action_was_run);
+
+ KUNIT_EXPECT_EQ(test, test_state->action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+ test_device = NULL;
+
+ // Make sure the remove hook was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_removed);
+
+ // We're going to test this again.
+ test_state->driver_device_probed = false;
+
+ // The driver should not automatically be destroyed by
+ // kunit_device_unregister, so we can re-use it.
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Probe was called again.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Everything is automatically freed here.
+}
+
+static struct kunit_case kunit_device_test_cases[] = {
+ KUNIT_CASE(kunit_device_test),
+ KUNIT_CASE(kunit_device_cleanup_test),
+ KUNIT_CASE(kunit_device_driver_test),
+ {}
+};
+
+static struct kunit_suite kunit_device_test_suite = {
+ .name = "kunit_device",
+ .test_cases = kunit_device_test_cases,
+};
+
static struct kunit_suite kunit_current_test_suite = {
.name = "kunit_current",
.test_cases = kunit_current_test_cases,
@@ -697,6 +826,6 @@ static struct kunit_suite kunit_current_test_suite = {
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
- &kunit_current_test_suite);
+ &kunit_current_test_suite, &kunit_device_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 06822766f29a..03fb511826f7 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -72,7 +72,7 @@ static void string_stream_unmanaged_init_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, stream->length, 0);
KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
- KUNIT_EXPECT_EQ(test, stream->gfp, GFP_KERNEL);
+ KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL));
KUNIT_EXPECT_FALSE(test, stream->append_newlines);
KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index a6f3616c2048..54f4fdcbfac8 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -173,7 +173,7 @@ void string_stream_destroy(struct string_stream *stream)
{
KUNIT_STATIC_STUB_REDIRECT(string_stream_destroy, stream);
- if (!stream)
+ if (IS_ERR_OR_NULL(stream))
return;
string_stream_clear(stream);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 7aceb07a1af9..1d1475578515 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -13,15 +13,20 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
+#include <linux/mm.h>
#include "debugfs.h"
+#include "device-impl.h"
#include "hooks-impl.h"
#include "string-stream.h"
#include "try-catch-impl.h"
+static DEFINE_MUTEX(kunit_run_lock);
+
/*
* Hook to fail the current test and print an error message to the log.
*/
@@ -660,6 +665,7 @@ int kunit_run_tests(struct kunit_suite *suite)
test.param_index++;
test.status = KUNIT_SUCCESS;
test.status_comment[0] = '\0';
+ test.priv = NULL;
}
}
@@ -692,6 +698,9 @@ static void kunit_init_suite(struct kunit_suite *suite)
kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
suite->suite_init_err = 0;
+
+ if (suite->log)
+ string_stream_clear(suite->log);
}
bool kunit_enabled(void)
@@ -710,6 +719,11 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
kunit_suite_counter = 1;
+ /* Use mutex lock to guard against running tests concurrently. */
+ if (mutex_lock_interruptible(&kunit_run_lock)) {
+ pr_err("kunit: test interrupted\n");
+ return -EINTR;
+ }
static_branch_inc(&kunit_running);
for (i = 0; i < num_suites; i++) {
@@ -718,6 +732,7 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
}
static_branch_dec(&kunit_running);
+ mutex_unlock(&kunit_run_lock);
return 0;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
@@ -742,28 +757,40 @@ EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
#ifdef CONFIG_MODULES
static void kunit_module_init(struct module *mod)
{
- struct kunit_suite_set suite_set = {
+ struct kunit_suite_set suite_set, filtered_set;
+ struct kunit_suite_set normal_suite_set = {
mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
};
+ struct kunit_suite_set init_suite_set = {
+ mod->kunit_init_suites, mod->kunit_init_suites + mod->num_kunit_init_suites,
+ };
const char *action = kunit_action();
int err = 0;
- suite_set = kunit_filter_suites(&suite_set,
+ if (mod->num_kunit_init_suites > 0)
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ else
+ suite_set = normal_suite_set;
+
+ filtered_set = kunit_filter_suites(&suite_set,
kunit_filter_glob() ?: "*.*",
kunit_filter(), kunit_filter_action(),
&err);
if (err)
pr_err("kunit module: error filtering suites: %d\n", err);
- mod->kunit_suites = (struct kunit_suite **)suite_set.start;
- mod->num_kunit_suites = suite_set.end - suite_set.start;
+ mod->kunit_suites = (struct kunit_suite **)filtered_set.start;
+ mod->num_kunit_suites = filtered_set.end - filtered_set.start;
+
+ if (mod->num_kunit_init_suites > 0)
+ kfree(suite_set.start);
if (!action)
- kunit_exec_run_tests(&suite_set, false);
+ kunit_exec_run_tests(&filtered_set, false);
else if (!strcmp(action, "list"))
- kunit_exec_list_tests(&suite_set, false);
+ kunit_exec_list_tests(&filtered_set, false);
else if (!strcmp(action, "list_attr"))
- kunit_exec_list_tests(&suite_set, true);
+ kunit_exec_list_tests(&filtered_set, true);
else
pr_err("kunit: unknown action '%s'\n", action);
}
@@ -775,12 +802,19 @@ static void kunit_module_exit(struct module *mod)
};
const char *action = kunit_action();
+ /*
+ * Check if the start address is a valid virtual address to detect
+ * if the module load sequence has failed and the suite set has not
+ * been initialized and filtered.
+ */
+ if (!suite_set.start || !virt_addr_valid(suite_set.start))
+ return;
+
if (!action)
__kunit_test_suites_exit(mod->kunit_suites,
mod->num_kunit_suites);
- if (suite_set.start)
- kunit_free_suite_set(suite_set);
+ kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
@@ -790,12 +824,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
switch (val) {
case MODULE_STATE_LIVE:
+ kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
- kunit_module_init(mod);
break;
case MODULE_STATE_UNFORMED:
break;
@@ -810,6 +844,8 @@ static struct notifier_block kunit_mod_nb = {
};
#endif
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_action_wrapper, kfree, const void *)
+
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
void *data;
@@ -819,7 +855,7 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
if (!data)
return NULL;
- if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0)
+ if (kunit_add_action_or_reset(test, kfree_action_wrapper, data) != 0)
return NULL;
return data;
@@ -831,7 +867,7 @@ void kunit_kfree(struct kunit *test, const void *ptr)
if (!ptr)
return;
- kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr);
+ kunit_release_action(test, kfree_action_wrapper, (void *)ptr);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
@@ -876,6 +912,8 @@ static int __init kunit_init(void)
kunit_install_hooks();
kunit_debugfs_init();
+
+ kunit_bus_init();
#ifdef CONFIG_MODULES
return register_module_notifier(&kunit_mod_nb);
#else
@@ -890,6 +928,9 @@ static void __exit kunit_exit(void)
#ifdef CONFIG_MODULES
unregister_module_notifier(&kunit_mod_nb);
#endif
+
+ kunit_bus_shutdown();
+
kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 684689457d77..6f241bb38799 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4,6 +4,8 @@
* Copyright (c) 2018-2022 Oracle Corporation
* Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
* Matthew Wilcox <willy@infradead.org>
+ * Copyright (c) 2023 ByteDance
+ * Author: Peng Zhang <zhangpeng.00@bytedance.com>
*/
/*
@@ -14,8 +16,8 @@
* and are simply the slot index + the minimum of the node.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
- * indicate that the tree is specifying ranges, Pivots may appear in the
- * subtree with an entry attached to the value where as keys are unique to a
+ * indicate that the tree is specifying ranges. Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*
@@ -165,6 +167,11 @@ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
}
+static inline void mt_free_one(struct maple_node *node)
+{
+ kmem_cache_free(maple_node_cache, node);
+}
+
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
@@ -205,23 +212,29 @@ static unsigned int mas_mt_height(struct ma_state *mas)
return mt_height(mas->tree);
}
-static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+static inline unsigned int mt_attr(struct maple_tree *mt)
+{
+ return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
+}
+
+static __always_inline enum maple_type mte_node_type(
+ const struct maple_enode *entry)
{
return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
MAPLE_NODE_TYPE_MASK;
}
-static inline bool ma_is_dense(const enum maple_type type)
+static __always_inline bool ma_is_dense(const enum maple_type type)
{
return type < maple_leaf_64;
}
-static inline bool ma_is_leaf(const enum maple_type type)
+static __always_inline bool ma_is_leaf(const enum maple_type type)
{
return type < maple_range_64;
}
-static inline bool mte_is_leaf(const struct maple_enode *entry)
+static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
{
return ma_is_leaf(mte_node_type(entry));
}
@@ -230,60 +243,50 @@ static inline bool mte_is_leaf(const struct maple_enode *entry)
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
-static inline bool mt_is_reserved(const void *entry)
+static __always_inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
-static inline void mas_set_err(struct ma_state *mas, long err)
+static __always_inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
+ mas->status = ma_error;
}
-static inline bool mas_is_ptr(const struct ma_state *mas)
+static __always_inline bool mas_is_ptr(const struct ma_state *mas)
{
- return mas->node == MAS_ROOT;
+ return mas->status == ma_root;
}
-static inline bool mas_is_start(const struct ma_state *mas)
+static __always_inline bool mas_is_start(const struct ma_state *mas)
{
- return mas->node == MAS_START;
+ return mas->status == ma_start;
}
-bool mas_is_err(struct ma_state *mas)
+static __always_inline bool mas_is_none(const struct ma_state *mas)
{
- return xa_is_err(mas->node);
+ return mas->status == ma_none;
}
-static __always_inline bool mas_is_overflow(struct ma_state *mas)
+static __always_inline bool mas_is_paused(const struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_OVERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_pause;
}
-static __always_inline bool mas_is_underflow(struct ma_state *mas)
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_UNDERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_overflow;
}
-static inline bool mas_searchable(struct ma_state *mas)
+static inline bool mas_is_underflow(struct ma_state *mas)
{
- if (mas_is_none(mas))
- return false;
-
- if (mas_is_ptr(mas))
- return false;
-
- return true;
+ return mas->status == ma_underflow;
}
-static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+static __always_inline struct maple_node *mte_to_node(
+ const struct maple_enode *entry)
{
return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
}
@@ -360,12 +363,12 @@ static inline bool mte_has_null(const struct maple_enode *node)
return (unsigned long)node & MAPLE_ENODE_NULL;
}
-static inline bool ma_is_root(struct maple_node *node)
+static __always_inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
-static inline bool mte_is_root(const struct maple_enode *node)
+static __always_inline bool mte_is_root(const struct maple_enode *node)
{
return ma_is_root(mte_to_node(node));
}
@@ -375,7 +378,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas)
return !mas->min && mas->max == ULONG_MAX;
}
-static inline bool mt_is_alloc(struct maple_tree *mt)
+static __always_inline bool mt_is_alloc(struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
}
@@ -514,11 +517,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
*
* Return: The slot in the parent node where @enode resides.
*/
-static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+static __always_inline
+unsigned int mte_parent_slot(const struct maple_enode *enode)
{
unsigned long val = (unsigned long)mte_to_node(enode)->parent;
- if (val & MA_ROOT_PARENT)
+ if (unlikely(val & MA_ROOT_PARENT))
return 0;
/*
@@ -534,7 +538,8 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
*
* Return: The parent maple node.
*/
-static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+static __always_inline
+struct maple_node *mte_parent(const struct maple_enode *enode)
{
return (void *)((unsigned long)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
@@ -546,7 +551,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
*
* Return: true if dead, false otherwise.
*/
-static inline bool ma_dead_node(const struct maple_node *node)
+static __always_inline bool ma_dead_node(const struct maple_node *node)
{
struct maple_node *parent;
@@ -562,7 +567,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
*
* Return: true if dead, false otherwise.
*/
-static inline bool mte_dead_node(const struct maple_enode *enode)
+static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
struct maple_node *parent, *node;
@@ -680,35 +685,6 @@ static inline unsigned long *ma_gaps(struct maple_node *node,
}
/*
- * mas_pivot() - Get the pivot at @piv of the maple encoded node.
- * @mas: The maple state.
- * @piv: The pivot.
- *
- * Return: the pivot at @piv of @mn.
- */
-static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
-{
- struct maple_node *node = mas_mn(mas);
- enum maple_type type = mte_node_type(mas->node);
-
- if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
- mas_set_err(mas, -EIO);
- return 0;
- }
-
- switch (type) {
- case maple_arange_64:
- return node->ma64.pivot[piv];
- case maple_range_64:
- case maple_leaf_64:
- return node->mr64.pivot[piv];
- case maple_dense:
- return 0;
- }
- return 0;
-}
-
-/*
* mas_safe_pivot() - get the pivot at @piv or mas->max.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
@@ -718,7 +694,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
* Return: The pivot at @piv within the limit of the @pivots array, @mas->max
* otherwise.
*/
-static inline unsigned long
+static __always_inline unsigned long
mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
{
@@ -759,7 +735,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
BUG_ON(piv >= mt_pivots[type]);
switch (type) {
- default:
case maple_range_64:
case maple_leaf_64:
node->mr64.pivot[piv] = val;
@@ -783,7 +758,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{
switch (mt) {
- default:
case maple_arange_64:
return mn->ma64.slot;
case maple_range_64:
@@ -792,6 +766,8 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
case maple_dense:
return mn->slot;
}
+
+ return NULL;
}
static inline bool mt_write_locked(const struct maple_tree *mt)
@@ -800,20 +776,20 @@ static inline bool mt_write_locked(const struct maple_tree *mt)
lockdep_is_held(&mt->ma_lock);
}
-static inline bool mt_locked(const struct maple_tree *mt)
+static __always_inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
-static inline void *mt_slot(const struct maple_tree *mt,
+static __always_inline void *mt_slot(const struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
-static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mt_slot_locked(struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
{
return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
@@ -825,8 +801,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset.
*/
-static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot_locked(struct ma_state *mas,
+ void __rcu **slots, unsigned char offset)
{
return mt_slot_locked(mas->tree, slots, offset);
}
@@ -839,8 +815,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset
*/
-static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
{
return mt_slot(mas->tree, slots, offset);
}
@@ -851,7 +827,7 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
*
* Return: The pointer to the root of the tree
*/
-static inline void *mas_root(struct ma_state *mas)
+static __always_inline void *mas_root(struct ma_state *mas)
{
return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
@@ -954,10 +930,8 @@ static inline unsigned char ma_meta_end(struct maple_node *mn,
/*
* ma_meta_gap() - Get the largest gap location of a node from the metadata
* @mn: The maple node
- * @mt: The maple node type
*/
-static inline unsigned char ma_meta_gap(struct maple_node *mn,
- enum maple_type mt)
+static inline unsigned char ma_meta_gap(struct maple_node *mn)
{
return mn->ma64.meta.gap;
}
@@ -1112,14 +1086,16 @@ static int mas_ascend(struct ma_state *mas)
return 0;
}
- if (!mas->min)
+ min = 0;
+ max = ULONG_MAX;
+ if (!mas->offset) {
+ min = mas->min;
set_min = true;
+ }
if (mas->max == ULONG_MAX)
set_max = true;
- min = 0;
- max = ULONG_MAX;
do {
p_enode = a_enode;
a_type = mas_parent_type(mas, p_enode);
@@ -1258,6 +1234,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
+ BUG_ON(!allocated);
WARN_ON(!allocated);
}
@@ -1363,14 +1340,14 @@ static void mas_node_count(struct ma_state *mas, int count)
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
- * If mas->node == MAS_START, then set the min, max and depth to
+ * If mas->status == mas_start, then set the min, max and depth to
* defaults.
*
* Return:
- * - If mas->node is an error or not MAS_START, return NULL.
- * - If it's an empty tree: NULL & mas->node == MAS_NONE
- * - If it's a single entry: The entry & mas->node == MAS_ROOT
- * - If it's a tree: NULL & mas->node == safe root node.
+ * - If mas->node is an error or not mas_start, return NULL.
+ * - If it's an empty tree: NULL & mas->status == ma_none
+ * - If it's a single entry: The entry & mas->status == mas_root
+ * - If it's a tree: NULL & mas->status == safe root node.
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1386,6 +1363,7 @@ retry:
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 1;
+ mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
if (mte_dead_node(mas->node))
@@ -1396,13 +1374,14 @@ retry:
/* empty tree */
if (unlikely(!root)) {
- mas->node = MAS_NONE;
+ mas->node = NULL;
+ mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
}
/* Single entry tree */
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */
@@ -1425,10 +1404,8 @@ retry:
* Uses metadata to find the end of the data when possible.
* Return: The zero indexed last slot with data (may be null).
*/
-static inline unsigned char ma_data_end(struct maple_node *node,
- enum maple_type type,
- unsigned long *pivots,
- unsigned long max)
+static __always_inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type, unsigned long *pivots, unsigned long max)
{
unsigned char offset;
@@ -1541,6 +1518,9 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
gap = ULONG_MAX - pivots[max_piv];
if (gap > max_gap)
max_gap = gap;
+
+ if (max_gap > pivots[max_piv] - mas->min)
+ return max_gap;
}
for (; i <= max_piv; i++) {
@@ -1608,7 +1588,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas)
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
- offset = ma_meta_gap(node, mt);
+ offset = ma_meta_gap(node);
gaps = ma_gaps(node, mt);
return gaps[offset];
}
@@ -1639,7 +1619,7 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
- meta_offset = ma_meta_gap(pnode, pmt);
+ meta_offset = ma_meta_gap(pnode);
meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
@@ -1987,27 +1967,13 @@ complete:
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
- * @mas: The maple state
* @node: The maple node
- * @pivots: pointer to the maple node pivots
* @mt: The maple type
- * @end: The assumed end
- *
- * Note, end may be incremented within this function but not modified at the
- * source. This is fine since the metadata is the last thing to be stored in a
- * node during a write.
+ * @end: The node end
*/
-static inline void mas_leaf_set_meta(struct ma_state *mas,
- struct maple_node *node, unsigned long *pivots,
+static inline void mas_leaf_set_meta(struct maple_node *node,
enum maple_type mt, unsigned char end)
{
- /* There is no room for metadata already */
- if (mt_pivots[mt] <= end)
- return;
-
- if (pivots[end] && pivots[end] < mas->max)
- end++;
-
if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
@@ -2064,7 +2030,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
ma_set_meta(node, mt, offset, end);
} else {
- mas_leaf_set_meta(mas, node, pivots, mt, end);
+ mas_leaf_set_meta(node, mt, end);
}
}
@@ -2152,11 +2118,11 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
}
slot = offset_end + 1;
- if (slot > wr_mas->node_end)
+ if (slot > mas->end)
goto b_end;
/* Copy end data to the end of the node. */
- mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
+ mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
b_node->b_end--;
return;
@@ -2211,19 +2177,21 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_node() - Return the encoded node or MAS_NONE.
+ * mte_node_or_none() - Set the enode and state.
* @enode: The encoded maple node.
*
- * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
- *
- * Return: @enode or MAS_NONE
+ * Set the node to the enode and the status.
*/
-static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+static inline void mas_node_or_none(struct ma_state *mas,
+ struct maple_enode *enode)
{
- if (enode)
- return enode;
-
- return ma_enode_ptr(MAS_NONE);
+ if (enode) {
+ mas->node = enode;
+ mas->status = ma_active;
+ } else {
+ mas->node = NULL;
+ mas->status = ma_none;
+ }
}
/*
@@ -2245,8 +2213,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
wr_mas->node = mas_mn(wr_mas->mas);
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
- count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
- wr_mas->pivots, mas->max);
+ count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
offset = mas->offset;
while (offset < count && mas->index > wr_mas->pivots[offset])
@@ -2535,7 +2503,7 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
}
/*
- * mas_topiary_node() - Dispose of a singe node
+ * mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
* @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
@@ -2543,13 +2511,15 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
* The node will either be RCU freed or pushed back on the maple state.
*/
static inline void mas_topiary_node(struct ma_state *mas,
- struct maple_enode *enode, bool in_rcu)
+ struct ma_state *tmp_mas, bool in_rcu)
{
struct maple_node *tmp;
+ struct maple_enode *enode;
- if (enode == MAS_NONE)
+ if (mas_is_none(tmp_mas))
return;
+ enode = tmp_mas->node;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
if (in_rcu)
@@ -2589,8 +2559,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/* Update the parent pointers in the tree */
tmp[0] = *mas;
tmp[0].offset = 0;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
while (!mte_is_leaf(tmp[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
@@ -2610,7 +2580,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++)
tmp[i] = tmp_next[i];
@@ -2623,8 +2593,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[0].node = old_enode;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
in_rcu = mt_in_rcu(mas->tree);
do {
n = 0;
@@ -2639,7 +2609,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
if ((tmp_next[n].min >= tmp_next->index) &&
(tmp_next[n].max <= tmp_next->last)) {
mat_add(&subtrees, tmp_next[n].node);
- tmp_next[n].node = MAS_NONE;
+ tmp_next[n].status = ma_none;
} else {
n++;
}
@@ -2650,16 +2620,16 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++) {
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
tmp[i] = tmp_next[i];
}
} while (!mte_is_leaf(tmp[0].node));
for (i = 0; i < 3; i++)
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
mas_mat_destroy(mas, &subtrees);
}
@@ -2698,9 +2668,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
{
bool new_lmax = true;
- mast->l->node = mte_node_or_none(left);
- mast->m->node = mte_node_or_none(middle);
- mast->r->node = mte_node_or_none(right);
+ mas_node_or_none(mast->l, left);
+ mas_node_or_none(mast->m, middle);
+ mas_node_or_none(mast->r, right);
mast->l->min = mast->orig_l->min;
if (split == mast->bn->b_end) {
@@ -2796,32 +2766,29 @@ static inline void *mtree_range_walk(struct ma_state *mas)
min = mas->min;
max = mas->max;
do {
- offset = 0;
last = next;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
-
- if (pivots[offset] >= mas->index) {
- prev_max = max;
- prev_min = min;
- max = pivots[offset];
+ prev_min = min;
+ prev_max = max;
+ if (pivots[0] >= mas->index) {
+ offset = 0;
+ max = pivots[0];
goto next;
}
- do {
+ offset = 1;
+ while (offset < end) {
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
offset++;
- } while ((offset < end) && (pivots[offset] < mas->index));
+ }
- prev_min = min;
min = pivots[offset - 1] + 1;
- prev_max = max;
- if (likely(offset < end && pivots[offset]))
- max = pivots[offset];
-
next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
@@ -2829,6 +2796,7 @@ next:
goto dead_node;
} while (!ma_is_leaf(type));
+ mas->end = end;
mas->offset = offset;
mas->index = min;
mas->last = max;
@@ -2879,7 +2847,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
- l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+ l_mas.status = r_mas.status = m_mas.status = ma_none;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
@@ -3167,7 +3135,7 @@ done:
* @mas: The maple state
* @height: The height of the tree in case it's a new root.
*/
-static inline bool mas_split_final_node(struct maple_subtree_state *mast,
+static inline void mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas, int height)
{
struct maple_enode *ancestor;
@@ -3191,7 +3159,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast,
mast->l->node = ancestor;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
- return true;
}
/*
@@ -3406,7 +3373,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
/* Try to push left. */
if (mas_push_data(mas, height, &mast, true))
break;
-
/* Try to push right. */
if (mas_push_data(mas, height, &mast, false))
break;
@@ -3495,6 +3461,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
mas_replace_node(wr_mas->mas, old_enode);
reuse_node:
mas_update_gap(wr_mas->mas);
+ wr_mas->mas->end = b_end;
return 1;
}
@@ -3521,6 +3488,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
if (mas->index) {
if (contents) {
@@ -3553,7 +3521,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
}
}
@@ -3730,23 +3698,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
- unsigned long max;
next = mas->node;
- max = ULONG_MAX;
do {
- offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
+ end = mt_pivots[type];
+ offset = 0;
do {
- if (pivots[offset] >= mas->index) {
- max = pivots[offset];
+ if (pivots[offset] >= mas->index)
break;
- }
} while (++offset < end);
slots = ma_slots(node, type);
@@ -3785,7 +3747,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
mas->depth = 0;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
goto done;
}
@@ -3798,6 +3760,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
mas->depth = 1;
@@ -3891,10 +3854,10 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
- mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
/* Copy r_mas into b_node. */
- if (r_mas.offset <= r_wr_mas.node_end)
- mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ if (r_mas.offset <= r_mas.end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
&b_node, b_node.b_end + 1);
else
b_node.b_end++;
@@ -3936,7 +3899,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
- mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+ mas_bulk_rebalance(mas, mas->end, wr_mas->type);
/* set up node. */
if (in_rcu) {
@@ -3972,12 +3935,12 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
- if (offset_end > wr_mas->node_end)
+ if (offset_end > mas->end)
goto done;
dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
- copy_size = wr_mas->node_end - offset_end + 1;
+ copy_size = mas->end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size);
memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
@@ -3987,7 +3950,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
dst_pivots[new_end] = mas->max;
done:
- mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
if (in_rcu) {
struct maple_enode *old_enode = mas->node;
@@ -3998,6 +3961,7 @@ done:
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
+ mas->end = new_end;
return true;
}
@@ -4063,10 +4027,10 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
} else {
/* Check next slot(s) if we are overwriting the end */
if ((mas->last == wr_mas->end_piv) &&
- (wr_mas->node_end != wr_mas->offset_end) &&
+ (mas->end != wr_mas->offset_end) &&
!wr_mas->slots[wr_mas->offset_end + 1]) {
wr_mas->offset_end++;
- if (wr_mas->offset_end == wr_mas->node_end)
+ if (wr_mas->offset_end == mas->end)
mas->last = mas->max;
else
mas->last = wr_mas->pivots[wr_mas->offset_end];
@@ -4091,11 +4055,11 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
- while ((wr_mas->offset_end < wr_mas->node_end) &&
+ while ((wr_mas->offset_end < wr_mas->mas->end) &&
(wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
wr_mas->offset_end++;
- if (wr_mas->offset_end < wr_mas->node_end)
+ if (wr_mas->offset_end < wr_mas->mas->end)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
@@ -4107,7 +4071,7 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end = wr_mas->node_end + 2;
+ unsigned char new_end = mas->end + 2;
new_end -= wr_mas->offset_end - mas->offset;
if (wr_mas->r_min == mas->index)
@@ -4141,10 +4105,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (mt_in_rcu(mas->tree))
return false;
- if (mas->offset != wr_mas->node_end)
- return false;
-
- end = wr_mas->node_end;
+ end = mas->end;
if (mas->offset != end)
return false;
@@ -4178,6 +4139,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
+ mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return true;
}
@@ -4195,7 +4157,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
}
static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
@@ -4223,7 +4185,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (mas_wr_append(wr_mas, new_end))
return;
- if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
+ if (new_end == mas->end && mas_wr_slot_store(wr_mas))
return;
if (mas_wr_node_store(wr_mas, new_end))
@@ -4328,7 +4290,7 @@ exists:
}
-static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
mas_set(mas, index);
@@ -4337,7 +4299,7 @@ retry:
goto retry;
}
-static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
struct maple_node *node, const unsigned long index)
{
if (unlikely(ma_dead_node(node))) {
@@ -4349,14 +4311,16 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas,
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
- * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
+ * tree. The prev value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* @mas: The maple state
* @min: The lower limit to search
*
- * The prev node value will be mas->node[mas->offset] or MAS_NONE.
+ * The prev node value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* Return: 1 if the node is dead, 0 otherwise.
*/
-static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+static int mas_prev_node(struct ma_state *mas, unsigned long min)
{
enum maple_type mt;
int offset, level;
@@ -4416,13 +4380,14 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
if (unlikely(mte_dead_node(mas->node)))
return 1;
+ mas->end = mas->offset;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return 0;
}
@@ -4436,8 +4401,7 @@ no_entry:
*
* Return: The entry in the previous slot which is possibly NULL
*/
-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
- bool set_underflow)
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
{
void *entry;
void __rcu **slots;
@@ -4470,13 +4434,16 @@ again:
mas->last = mas->index - 1;
mas->index = mas_safe_min(mas, pivots, mas->offset);
} else {
+ if (mas->index <= min)
+ goto underflow;
+
if (mas_prev_node(mas, min)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (mas_is_none(mas))
- goto underflow;
+ if (WARN_ON_ONCE(mas_is_underflow(mas)))
+ return NULL;
mas->last = mas->max;
node = mas_mn(mas);
@@ -4490,12 +4457,15 @@ again:
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
+
if (likely(entry))
return entry;
if (!empty) {
- if (mas->index <= min)
- goto underflow;
+ if (mas->index <= min) {
+ mas->status = ma_underflow;
+ return NULL;
+ }
goto again;
}
@@ -4503,8 +4473,7 @@ again:
return entry;
underflow:
- if (set_underflow)
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return NULL;
}
@@ -4513,28 +4482,30 @@ underflow:
* @mas: The maple state
* @max: The maximum pivot value to check.
*
- * The next value will be mas->node[mas->offset] or MAS_NONE.
+ * The next value will be mas->node[mas->offset] or the status will have
+ * overflowed.
* Return: 1 on dead node, 0 otherwise.
*/
-static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
- unsigned long max)
+static int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
{
unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
+ struct maple_node *tmp;
int level = 0;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
if (mas->max >= max)
- goto no_entry;
+ goto overflow;
min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node))
- goto no_entry;
+ goto overflow;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
@@ -4574,6 +4545,10 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
pivots = ma_pivots(node, mt);
mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
+ tmp = mte_to_node(enode);
+ mt = mte_node_type(enode);
+ pivots = ma_pivots(tmp, mt);
+ mas->end = ma_data_end(tmp, mt, pivots, mas->max);
if (unlikely(ma_dead_node(node)))
return 1;
@@ -4581,11 +4556,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
mas->min = min;
return 0;
-no_entry:
+overflow:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_overflow;
return 0;
}
@@ -4600,15 +4575,13 @@ no_entry:
*
* Return: The entry in the next slot which is possibly NULL
*/
-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
- bool set_overflow)
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
void __rcu **slots;
unsigned long *pivots;
unsigned long pivot;
enum maple_type type;
struct maple_node *node;
- unsigned char data_end;
unsigned long save_point = mas->last;
void *entry;
@@ -4616,42 +4589,45 @@ retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
- data_end = ma_data_end(node, type, pivots, mas->max);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (mas->max >= max) {
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
pivot = pivots[mas->offset];
else
- goto overflow;
+ pivot = mas->max;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
- if (pivot >= max)
- goto overflow;
+ if (pivot >= max) { /* Was at the limit, next will extend beyond */
+ mas->status = ma_overflow;
+ return NULL;
+ }
}
- if (likely(mas->offset < data_end)) {
+ if (likely(mas->offset < mas->end)) {
mas->index = pivots[mas->offset] + 1;
again:
mas->offset++;
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
mas->last = pivots[mas->offset];
else
mas->last = mas->max;
} else {
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
if (mas_next_node(mas, node, max)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (WARN_ON_ONCE(mas_is_none(mas))) {
- mas->node = MAS_OVERFLOW;
+ if (WARN_ON_ONCE(mas_is_overflow(mas)))
return NULL;
- goto overflow;
- }
mas->offset = 0;
mas->index = mas->min;
@@ -4669,21 +4645,18 @@ again:
if (entry)
return entry;
+
if (!empty) {
- if (mas->last >= max)
- goto overflow;
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
mas->index = mas->last + 1;
- /* Node cannot end on NULL, so it's safe to short-cut here */
goto again;
}
return entry;
-
-overflow:
- if (set_overflow)
- mas->node = MAS_OVERFLOW;
- return NULL;
}
/*
@@ -4702,11 +4675,11 @@ overflow:
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
if (mas->last >= limit) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return NULL;
}
- return mas_next_slot(mas, limit, false, true);
+ return mas_next_slot(mas, limit, false);
}
/*
@@ -4874,7 +4847,7 @@ done:
* @mas: The maple state.
*
* mas->index and mas->last will be set to the range if there is a value. If
- * mas->node is MAS_NONE, reset to MAS_START.
+ * mas->status is ma_none, reset to ma_start
*
* Return: the entry at the location or %NULL.
*/
@@ -4883,7 +4856,7 @@ void *mas_walk(struct ma_state *mas)
void *entry;
if (!mas_is_active(mas) || !mas_is_start(mas))
- mas->node = MAS_START;
+ mas->status = ma_start;
retry:
entry = mas_state_walk(mas);
if (mas_is_start(mas)) {
@@ -4899,7 +4872,7 @@ retry:
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return NULL;
}
@@ -5026,6 +4999,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned char offset;
unsigned long *pivots;
enum maple_type mt;
+ struct maple_node *node;
if (min > max)
return -EINVAL;
@@ -5056,12 +5030,14 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
if (unlikely(offset == MAPLE_NODE_SLOTS))
return -EBUSY;
+ node = mas_mn(mas);
mt = mte_node_type(mas->node);
- pivots = ma_pivots(mas_mn(mas), mt);
+ pivots = ma_pivots(node, mt);
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
+ mas->end = ma_data_end(node, mt, pivots, mas->max);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area);
@@ -5122,6 +5098,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas->last = max;
mas->index = mas->last - size + 1;
+ mas->end = mas_data_end(mas);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
@@ -5503,7 +5480,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
node_size = mas_wr_new_end(&wr_mas);
/* Slot store, does not require additional nodes */
- if (node_size == wr_mas.node_end) {
+ if (node_size == mas->end) {
/* reuse node */
if (!mt_in_rcu(mas->tree))
return 0;
@@ -5518,7 +5495,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
goto ask_now;
}
- /* New root needs a singe node */
+ /* New root needs a single node */
if (unlikely(mte_is_root(mas->node)))
goto ask_now;
@@ -5566,7 +5543,7 @@ void mas_destroy(struct ma_state *mas)
mas_start(mas);
mtree_range_walk(mas);
- end = mas_data_end(mas) + 1;
+ end = mas->end + 1;
if (end < mt_min_slot_count(mas->node) - 1)
mas_destroy_rebalance(mas, end);
@@ -5584,7 +5561,7 @@ void mas_destroy(struct ma_state *mas)
mt_free_bulk(count, (void __rcu **)&node->slot[1]);
total -= count;
}
- kmem_cache_free(maple_node_cache, node);
+ mt_free_one(ma_mnode_ptr(node));
total--;
}
@@ -5654,33 +5631,46 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
}
EXPORT_SYMBOL_GPL(mas_expected_entries);
-static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+static bool mas_next_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
bool was_none = mas_is_none(mas);
if (unlikely(mas->last >= max)) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas)) {
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ fallthrough;
+ case ma_start:
+ mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ break;
+ case ma_overflow:
/* Overflowed before, but the max changed */
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow:
+ /* The user expects the mas to be one before where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
- if (mas_is_start(mas))
- *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ if (likely(mas_is_active(mas))) /* Fast path */
+ return false;
if (mas_is_ptr(mas)) {
*entry = NULL;
@@ -5690,7 +5680,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
}
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -5719,7 +5709,7 @@ void *mas_next(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, true);
+ return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_next);
@@ -5742,7 +5732,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, true);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
@@ -5770,37 +5760,48 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
}
EXPORT_SYMBOL_GPL(mt_next);
-static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
- void **entry)
+static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
{
if (unlikely(mas->index <= min)) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_overflow(mas)) {
- mas->node = MAS_START;
+ case ma_start:
+ break;
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* underflowed before but the min changed */
+ mas->status = ma_active;
+ break;
+ case ma_overflow:
+ /* User expects mas to be one after where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
- }
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- /* underflowed before but the min changed */
- mas->node = MAS_START;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas))
mas_walk(mas);
if (unlikely(mas_is_ptr(mas))) {
- if (!mas->index)
- goto none;
+ if (!mas->index) {
+ mas->status = ma_none;
+ return true;
+ }
mas->index = mas->last = 0;
*entry = mas_root(mas);
return true;
@@ -5810,7 +5811,7 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
if (mas->index) {
/* Walked to out-of-range pointer? */
mas->index = mas->last = 0;
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
*entry = mas_root(mas);
return true;
}
@@ -5818,10 +5819,6 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
}
return false;
-
-none:
- mas->node = MAS_NONE;
- return true;
}
/**
@@ -5830,7 +5827,7 @@ none:
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the status is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5842,7 +5839,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, false, true);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_prev);
@@ -5853,7 +5850,7 @@ EXPORT_SYMBOL_GPL(mas_prev);
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the node is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5865,7 +5862,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, true, true);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
@@ -5908,7 +5905,8 @@ EXPORT_SYMBOL_GPL(mt_prev);
*/
void mas_pause(struct ma_state *mas)
{
- mas->node = MAS_PAUSE;
+ mas->status = ma_pause;
+ mas->node = NULL;
}
EXPORT_SYMBOL_GPL(mas_pause);
@@ -5920,35 +5918,54 @@ EXPORT_SYMBOL_GPL(mas_pause);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
- void **entry)
+static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
{
- if (mas_is_active(mas)) {
+ switch (mas->status) {
+ case ma_active:
if (mas->last < max)
return false;
-
return true;
- }
-
- if (mas_is_paused(mas)) {
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->last >= max))
return true;
mas->index = ++mas->last;
- mas->node = MAS_START;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (unlikely(mas->last >= max))
return true;
mas->index = mas->last;
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
- if (mas->index > max) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* mas is pointing at entry before unable to go lower */
+ if (unlikely(mas->index >= max)) {
+ mas->status = ma_overflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_overflow:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -5962,12 +5979,11 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
}
- if (unlikely(!mas_searchable(mas))) {
- if (unlikely(mas_is_ptr(mas)))
- goto ptr_out_of_range;
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+ if (unlikely(mas_is_none(mas)))
return true;
- }
if (mas->index == max)
return true;
@@ -5975,7 +5991,7 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
return false;
ptr_out_of_range:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
mas->index = 1;
mas->last = ULONG_MAX;
return true;
@@ -5989,7 +6005,7 @@ ptr_out_of_range:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -6001,7 +6017,10 @@ void *mas_find(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, false);
+ entry = mas_next_slot(mas, max, false);
+ /* Ignore overflow */
+ mas->status = ma_active;
+ return entry;
}
EXPORT_SYMBOL_GPL(mas_find);
@@ -6013,7 +6032,7 @@ EXPORT_SYMBOL_GPL(mas_find);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -6025,7 +6044,7 @@ void *mas_find_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, false);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_find_range);
@@ -6037,36 +6056,48 @@ EXPORT_SYMBOL_GPL(mas_find_range);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- if (mas_is_active(mas)) {
- if (mas->index > min)
- return false;
-
- return true;
- }
- if (mas_is_paused(mas)) {
+ switch (mas->status) {
+ case ma_active:
+ goto active;
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->index <= min)) {
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
mas->last = --mas->index;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (mas->index <= min)
goto none;
mas->last = mas->index;
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
- if (mas->last <= min) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_overflow: /* user expects the mas to be one after where it is */
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow: /* user expects the mas to be one before where it is */
+ if (unlikely(mas->index <= min))
+ return true;
+
+ mas->status = ma_active;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -6079,29 +6110,28 @@ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
return true;
}
- if (unlikely(!mas_searchable(mas))) {
- if (mas_is_ptr(mas))
- goto none;
+ if (unlikely(mas_is_ptr(mas)))
+ goto none;
- if (mas_is_none(mas)) {
- /*
- * Walked to the location, and there was nothing so the
- * previous location is 0.
- */
- mas->last = mas->index = 0;
- mas->node = MAS_ROOT;
- *entry = mas_root(mas);
- return true;
- }
+ if (unlikely(mas_is_none(mas))) {
+ /*
+ * Walked to the location, and there was nothing so the previous
+ * location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->status = ma_root;
+ *entry = mas_root(mas);
+ return true;
}
+active:
if (mas->index < min)
return true;
return false;
none:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -6114,7 +6144,7 @@ none:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6126,7 +6156,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, false, false);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
@@ -6140,7 +6170,7 @@ EXPORT_SYMBOL_GPL(mas_find_rev);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6152,7 +6182,7 @@ void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, true, false);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
@@ -6172,8 +6202,8 @@ void *mas_erase(struct ma_state *mas)
void *entry;
MA_WR_STATE(wr_mas, mas, NULL);
- if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->status = ma_start;
/* Retry unnecessary when holding the write lock. */
entry = mas_state_walk(mas);
@@ -6218,7 +6248,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
if (!mas_allocated(mas))
return false;
- mas->node = MAS_START;
+ mas->status = ma_start;
return true;
}
@@ -6476,6 +6506,278 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index)
}
EXPORT_SYMBOL(mtree_erase);
+/*
+ * mas_dup_free() - Free an incomplete duplication of a tree.
+ * @mas: The maple state of a incomplete tree.
+ *
+ * The parameter @mas->node passed in indicates that the allocation failed on
+ * this node. This function frees all nodes starting from @mas->node in the
+ * reverse order of mas_dup_build(). There is no need to hold the source tree
+ * lock at this time.
+ */
+static void mas_dup_free(struct ma_state *mas)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char count, i;
+
+ /* Maybe the first node allocation failed. */
+ if (mas_is_none(mas))
+ return;
+
+ while (!mte_is_root(mas->node)) {
+ mas_ascend(mas);
+ if (mas->offset) {
+ mas->offset--;
+ do {
+ mas_descend(mas);
+ mas->offset = mas_data_end(mas);
+ } while (!mte_is_leaf(mas->node));
+
+ mas_ascend(mas);
+ }
+
+ node = mte_to_node(mas->node);
+ type = mte_node_type(mas->node);
+ slots = ma_slots(node, type);
+ count = mas_data_end(mas) + 1;
+ for (i = 0; i < count; i++)
+ ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
+ mt_free_bulk(count, slots);
+ }
+
+ node = mte_to_node(mas->node);
+ mt_free_one(node);
+}
+
+/*
+ * mas_copy_node() - Copy a maple node and replace the parent.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @parent: The parent of the new node.
+ *
+ * Copy @mas->node to @new_mas->node, set @parent to be the parent of
+ * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
+ struct maple_pnode *parent)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ unsigned long val;
+
+ /* Copy the node completely. */
+ memcpy(new_node, node, sizeof(struct maple_node));
+ /* Update the parent node pointer. */
+ val = (unsigned long)node->parent & MAPLE_NODE_MASK;
+ new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
+}
+
+/*
+ * mas_dup_alloc() - Allocate child nodes for a maple node.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function allocates child nodes for @new_mas->node during the duplication
+ * process. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ enum maple_type type;
+ unsigned char request, count, i;
+ void __rcu **slots;
+ void __rcu **new_slots;
+ unsigned long val;
+
+ /* Allocate memory for child nodes. */
+ type = mte_node_type(mas->node);
+ new_slots = ma_slots(new_node, type);
+ request = mas_data_end(mas) + 1;
+ count = mt_alloc_bulk(gfp, request, (void **)new_slots);
+ if (unlikely(count < request)) {
+ memset(new_slots, 0, request * sizeof(void *));
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ /* Restore node type information in slots. */
+ slots = ma_slots(node, type);
+ for (i = 0; i < count; i++) {
+ val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
+ val &= MAPLE_NODE_MASK;
+ ((unsigned long *)new_slots)[i] |= val;
+ }
+}
+
+/*
+ * mas_dup_build() - Build a new maple tree from a source tree
+ * @mas: The maple state of source tree, need to be in MAS_START state.
+ * @new_mas: The maple state of new tree, need to be in MAS_START state.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function builds a new tree in DFS preorder. If the memory allocation
+ * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
+ * last node. mas_dup_free() will free the incomplete duplication of a tree.
+ *
+ * Note that the attributes of the two trees need to be exactly the same, and the
+ * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
+ */
+static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node;
+ struct maple_pnode *parent = NULL;
+ struct maple_enode *root;
+ enum maple_type type;
+
+ if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
+ unlikely(!mtree_empty(new_mas->tree))) {
+ mas_set_err(mas, -EINVAL);
+ return;
+ }
+
+ root = mas_start(mas);
+ if (mas_is_ptr(mas) || mas_is_none(mas))
+ goto set_new_tree;
+
+ node = mt_alloc_one(gfp);
+ if (!node) {
+ new_mas->status = ma_none;
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ type = mte_node_type(mas->node);
+ root = mt_mk_node(node, type);
+ new_mas->node = root;
+ new_mas->min = 0;
+ new_mas->max = ULONG_MAX;
+ root = mte_mk_root(root);
+ while (1) {
+ mas_copy_node(mas, new_mas, parent);
+ if (!mte_is_leaf(mas->node)) {
+ /* Only allocate child nodes for non-leaf nodes. */
+ mas_dup_alloc(mas, new_mas, gfp);
+ if (unlikely(mas_is_err(mas)))
+ return;
+ } else {
+ /*
+ * This is the last leaf node and duplication is
+ * completed.
+ */
+ if (mas->max == ULONG_MAX)
+ goto done;
+
+ /* This is not the last leaf node and needs to go up. */
+ do {
+ mas_ascend(mas);
+ mas_ascend(new_mas);
+ } while (mas->offset == mas_data_end(mas));
+
+ /* Move to the next subtree. */
+ mas->offset++;
+ new_mas->offset++;
+ }
+
+ mas_descend(mas);
+ parent = ma_parent_ptr(mte_to_node(new_mas->node));
+ mas_descend(new_mas);
+ mas->offset = 0;
+ new_mas->offset = 0;
+ }
+done:
+ /* Specially handle the parent of the root node. */
+ mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
+set_new_tree:
+ /* Make them the same height */
+ new_mas->tree->ma_flags = mas->tree->ma_flags;
+ rcu_assign_pointer(new_mas->tree->ma_root, root);
+}
+
+/**
+ * __mt_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ * Note that the user needs to manually lock the source tree and the new tree.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_dup_build(&mas, &new_mas, gfp);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__mt_dup);
+
+/**
+ * mtree_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_lock(&new_mas);
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+ mas_dup_build(&mas, &new_mas, gfp);
+ mas_unlock(&mas);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ mas_unlock(&new_mas);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_dup);
+
/**
* __mt_destroy() - Walk and free all nodes of a locked maple tree.
* @mt: The maple tree
@@ -6490,7 +6792,7 @@ void __mt_destroy(struct maple_tree *mt)
if (xa_is_node(root))
mte_destroy_walk(root, mt);
- mt->ma_flags = 0;
+ mt->ma_flags = mt_attr(mt);
}
EXPORT_SYMBOL_GPL(__mt_destroy);
@@ -6549,7 +6851,7 @@ retry:
if (entry)
goto unlock;
- while (mas_searchable(&mas) && (mas.last < max)) {
+ while (mas_is_active(&mas) && (mas.last < max)) {
entry = mas_next_entry(&mas, max);
if (likely(entry && !xa_is_zero(entry)))
break;
@@ -6631,26 +6933,6 @@ unsigned int mt_nr_allocated(void)
return kmem_cache_nr_allocated(maple_node_cache);
}
-/*
- * mas_dead_node() - Check if the maple state is pointing to a dead node.
- * @mas: The maple state
- * @index: The index to restore in @mas.
- *
- * Used in test code.
- * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
- */
-static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
-{
- if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
- return 0;
-
- if (likely(!mte_dead_node(mas->node)))
- return 0;
-
- mas_rewalk(mas, index);
- return 1;
-}
-
void mt_cache_shrink(void)
{
}
@@ -6689,11 +6971,11 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
- struct maple_enode *p = MAS_NONE, *mn = mas->node;
+ struct maple_enode *p, *mn = mas->node;
unsigned long p_min, p_max;
mas_next_node(mas, mas_mn(mas), max);
- if (!mas_is_none(mas))
+ if (!mas_is_overflow(mas))
return;
if (mte_is_root(mn))
@@ -6706,7 +6988,7 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
- } while (!mas_is_none(mas));
+ } while (!mas_is_underflow(mas));
mas->node = p;
mas->max = p_max;
@@ -6729,7 +7011,6 @@ static void mt_dump_range(unsigned long min, unsigned long max,
else
pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
break;
- default:
case mt_dump_dec:
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
@@ -6769,7 +7050,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6799,7 +7079,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
- default:
case mt_dump_dec:
pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
@@ -6824,7 +7103,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%lx ", node->gap[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%lu ", node->gap[i]);
}
@@ -6835,7 +7113,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6976,7 +7253,8 @@ static void mas_validate_gaps(struct ma_state *mas)
counted:
if (mt == maple_arange_64) {
- offset = ma_meta_gap(node, mt);
+ MT_BUG_ON(mas->tree, !gaps);
+ offset = ma_meta_gap(node);
if (offset > i) {
pr_err("gap offset %p[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
@@ -6988,7 +7266,6 @@ counted:
MT_BUG_ON(mas->tree, 1);
}
- MT_BUG_ON(mas->tree, !gaps);
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
pr_err("gap %p[%u] beyond node limit != 0\n",
@@ -7166,7 +7443,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
- if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
return;
while (!mte_is_leaf(mas.node))
@@ -7183,7 +7460,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
last = entry;
if (offset == mas_data_end(&mas)) {
mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
- if (mas_is_none(&mas))
+ if (mas_is_overflow(&mas))
return;
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
@@ -7192,7 +7469,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
offset++;
}
- } while (!mas_is_none(&mas));
+ } while (!mas_is_overflow(&mas));
}
/*
@@ -7207,13 +7484,13 @@ void mt_validate(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);
- if (!mas_searchable(&mas))
+ if (!mas_is_active(&mas))
goto done;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
- while (!mas_is_none(&mas)) {
+ while (!mas_is_overflow(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
@@ -7238,16 +7515,35 @@ EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
- if (mas_is_none(mas))
- pr_err("(MAS_NONE) ");
- else if (mas_is_ptr(mas))
- pr_err("(MAS_ROOT) ");
- else if (mas_is_start(mas))
- pr_err("(MAS_START) ");
- else if (mas_is_paused(mas))
- pr_err("(MAS_PAUSED) ");
-
- pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
+ switch (mas->status) {
+ case ma_active:
+ pr_err("(ma_active)");
+ break;
+ case ma_none:
+ pr_err("(ma_none)");
+ break;
+ case ma_root:
+ pr_err("(ma_root)");
+ break;
+ case ma_start:
+ pr_err("(ma_start) ");
+ break;
+ case ma_pause:
+ pr_err("(ma_pause) ");
+ break;
+ case ma_overflow:
+ pr_err("(ma_overflow) ");
+ break;
+ case ma_underflow:
+ pr_err("(ma_underflow) ");
+ break;
+ case ma_error:
+ pr_err("(ma_error) ");
+ break;
+ }
+
+ pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
+ mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
if (mas->index > mas->last)
@@ -7260,7 +7556,7 @@ void mas_wr_dump(const struct ma_wr_state *wr_mas)
pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
- wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
+ wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
wr_mas->end_piv);
}
EXPORT_SYMBOL_GPL(mas_wr_dump);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dc15e7888fc1..be9c576b6e2d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
/*
@@ -758,7 +762,7 @@ EXPORT_SYMBOL(nla_find);
* @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
- * Unlike strlcpy the destination buffer is always padded out.
+ * Unlike strscpy() the destination buffer is always padded out.
*
* Return:
* * srclen - Returns @nla length (not including the trailing %NUL).
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 34db0b3aa502..c527f6b75789 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/device.h>
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -618,7 +619,7 @@ static void overflow_allocation_test(struct kunit *test)
} while (0)
/* Create dummy device for devm_kmalloc()-family tests. */
- dev = root_device_register(device_name);
+ dev = kunit_device_register(test, device_name);
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
"Cannot register test device\n");
@@ -634,8 +635,6 @@ static void overflow_allocation_test(struct kunit *test)
check_allocation_overflow(devm_kmalloc);
check_allocation_overflow(devm_kzalloc);
- device_unregister(dev);
-
kunit_info(test, "%d allocation overflow tests finished\n", count);
#undef check_allocation_overflow
}
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index b25dfc9c7759..cd0b9e95f499 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -158,7 +158,7 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
static int raid6_s390vx$#_valid(void)
{
- return MACHINE_HAS_VX;
+ return cpu_has_vx();
}
const struct raid6_calls raid6_s390vx$# = {
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index d0a5081dfd12..92c6b1fd8989 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -388,11 +388,6 @@ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int shallow_depth;
/*
- * For each batch, we wake up one queue. We need to make sure that our
- * batch size is small enough that the full depth of the bitmap,
- * potentially limited by a shallow depth, is enough to wake up all of
- * the queues.
- *
* Each full word of the bitmap has bits_per_word bits, and there might
* be a partial word. There are depth / bits_per_word full words and
* depth % bits_per_word bits left over. In bitwise arithmetic:
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 010c730ca7fc..f3f3436d60a9 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -13,16 +13,26 @@
* seq_buf_init() more than once to reset the seq_buf to start
* from scratch.
*/
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/minmax.h>
+#include <linux/printk.h>
#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
/**
* seq_buf_can_fit - can the new data fit in the current buffer?
* @s: the seq_buf descriptor
* @len: The length to see if it can fit in the current buffer
*
- * Returns true if there's enough unused space in the seq_buf buffer
+ * Returns: true if there's enough unused space in the seq_buf buffer
* to fit the amount of new data according to @len.
*/
static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
@@ -35,7 +45,7 @@ static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
* @m: the seq_file descriptor that is the destination
* @s: the seq_buf descriptor that is the source.
*
- * Returns zero on success, non zero otherwise
+ * Returns: zero on success, non-zero otherwise.
*/
int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
{
@@ -50,9 +60,9 @@ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
* @fmt: printf format string
* @args: va_list of arguments from a printf() type function
*
- * Writes a vnprintf() format into the sequencce buffer.
+ * Writes a vnprintf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
{
@@ -78,7 +88,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
*
* Writes a printf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
{
@@ -94,12 +104,12 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
EXPORT_SYMBOL_GPL(seq_buf_printf);
/**
- * seq_buf_do_printk - printk seq_buf line by line
+ * seq_buf_do_printk - printk() seq_buf line by line
* @s: seq_buf descriptor
* @lvl: printk level
*
* printk()-s a multi-line sequential buffer line by line. The function
- * makes sure that the buffer in @s is nul terminated and safe to read
+ * makes sure that the buffer in @s is NUL-terminated and safe to read
* as a string.
*/
void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
@@ -139,7 +149,7 @@ EXPORT_SYMBOL_GPL(seq_buf_do_printk);
* This function will take the format and the binary array and finish
* the conversion into the ASCII string within the buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
{
@@ -167,7 +177,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
*
* Copy a simple string into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_puts(struct seq_buf *s, const char *str)
{
@@ -196,7 +206,7 @@ EXPORT_SYMBOL_GPL(seq_buf_puts);
*
* Copy a single character into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putc(struct seq_buf *s, unsigned char c)
{
@@ -212,7 +222,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
EXPORT_SYMBOL_GPL(seq_buf_putc);
/**
- * seq_buf_putmem - write raw data into the sequenc buffer
+ * seq_buf_putmem - write raw data into the sequence buffer
* @s: seq_buf descriptor
* @mem: The raw memory to copy into the buffer
* @len: The length of the raw memory to copy (in bytes)
@@ -221,7 +231,7 @@ EXPORT_SYMBOL_GPL(seq_buf_putc);
* buffer and a strcpy() would not work. Using this function allows
* for such cases.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
{
@@ -249,7 +259,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
* raw memory into the buffer it writes its ASCII representation of it
* in hex characters.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len)
@@ -297,7 +307,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
*
* Write a path name into the sequence buffer.
*
- * Returns the number of written bytes on success, -1 on overflow
+ * Returns: the number of written bytes on success, -1 on overflow.
*/
int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
{
@@ -332,6 +342,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
* or until it reaches the end of the content in the buffer (@s->len),
* whichever comes first.
*
+ * Returns:
* On success, it returns a positive number of the number of bytes
* it copied.
*
@@ -382,11 +393,11 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
* linebuf size is maximal length for one line.
* 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for
* separating space
- * 2 - spaces separating hex dump and ascii representation
- * 32 - ascii representation
+ * 2 - spaces separating hex dump and ASCII representation
+ * 32 - ASCII representation
* 1 - terminating '\0'
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 2f5aa851834e..4a7055a63d9f 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -14,15 +14,21 @@
#define pr_fmt(fmt) "stackdepot: " fmt
+#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kmsan.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
-#include <linux/percpu.h>
+#include <linux/poison.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
@@ -32,13 +38,12 @@
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-#define DEPOT_VALID_BITS 1
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
#define DEPOT_STACK_ALIGN 4
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
-#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
- DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
#define DEPOT_POOLS_CAP 8192
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
@@ -50,17 +55,33 @@ union handle_parts {
struct {
u32 pool_index : DEPOT_POOL_INDEX_BITS;
u32 offset : DEPOT_OFFSET_BITS;
- u32 valid : DEPOT_VALID_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
- struct stack_record *next; /* Link in the hash table */
- u32 hash; /* Hash in the hash table */
+ struct list_head hash_list; /* Links in the hash table */
+ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
- union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of frames */
+ union handle_parts handle; /* Constant after initialization */
+ refcount_t count;
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
};
static bool stack_depot_disabled;
@@ -75,40 +96,50 @@ static bool __stack_depot_early_init_passed __initdata;
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
-/* Hash table of pointers to stored stack traces. */
-static struct stack_record **stack_table;
+/* Hash table of stored stack records. */
+static struct list_head *stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
-/* Array of memory regions that store stack traces. */
+/* Array of memory regions that store stack records. */
static void *stack_pools[DEPOT_MAX_POOLS];
-/* Currently used pool in stack_pools. */
-static int pool_index;
+/* Newly allocated pool that is not yet added to stack_pools. */
+static void *new_pool;
+/* Number of pools in stack_pools. */
+static int pools_num;
/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset;
-/* Lock that protects the variables above. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
+/* Freelist of stack records within stack_pools. */
+static LIST_HEAD(free_stacks);
+/* The lock must be held when performing pool or freelist modifications. */
static DEFINE_RAW_SPINLOCK(pool_lock);
-/*
- * Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool.
- * This flag marks that this next extra pool needs to be allocated and
- * initialized. It has the value 0 when either the next pool is not yet
- * initialized or the limit on the number of pools is reached.
- */
-static int next_pool_required = 1;
+
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+ DEPOT_COUNTER_REFD_ALLOCS,
+ DEPOT_COUNTER_REFD_FREES,
+ DEPOT_COUNTER_REFD_INUSE,
+ DEPOT_COUNTER_FREELIST_SIZE,
+ DEPOT_COUNTER_PERSIST_COUNT,
+ DEPOT_COUNTER_PERSIST_BYTES,
+ DEPOT_COUNTER_COUNT,
+};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+ [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
+ [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
+ [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
+ [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
+ [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
+ [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
static int __init disable_stack_depot(char *str)
{
- int ret;
-
- ret = kstrtobool(str, &stack_depot_disabled);
- if (!ret && stack_depot_disabled) {
- pr_info("disabled\n");
- stack_table = NULL;
- }
- return 0;
+ return kstrtobool(str, &stack_depot_disabled);
}
early_param("stack_depot_disable", disable_stack_depot);
@@ -120,6 +151,15 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true;
}
+/* Initialize list_head's within the hash table. */
+static void init_stack_table(unsigned long entries)
+{
+ unsigned long i;
+
+ for (i = 0; i < entries; i++)
+ INIT_LIST_HEAD(&stack_table[i]);
+}
+
/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
@@ -131,6 +171,15 @@ int __init stack_depot_early_init(void)
__stack_depot_early_init_passed = true;
/*
+ * Print disabled message even if early init has not been requested:
+ * stack_depot_init() will not print one.
+ */
+ if (stack_depot_disabled) {
+ pr_info("disabled\n");
+ return 0;
+ }
+
+ /*
* If KASAN is enabled, use the maximum order: KASAN is frequently used
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
@@ -138,21 +187,25 @@ int __init stack_depot_early_init(void)
if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
- if (!__stack_depot_early_init_requested || stack_depot_disabled)
+ /*
+ * Check if early init has been requested after setting
+ * stack_bucket_number_order: stack_depot_init() uses its value.
+ */
+ if (!__stack_depot_early_init_requested)
return 0;
/*
* If stack_bucket_number_order is not set, leave entries as 0 to rely
- * on the automatic calculations performed by alloc_large_system_hash.
+ * on the automatic calculations performed by alloc_large_system_hash().
*/
if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
- sizeof(struct stack_record *),
+ sizeof(struct list_head),
entries,
STACK_HASH_TABLE_SCALE,
- HASH_EARLY | HASH_ZERO,
+ HASH_EARLY,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
@@ -162,6 +215,14 @@ int __init stack_depot_early_init(void)
stack_depot_disabled = true;
return -ENOMEM;
}
+ if (!entries) {
+ /*
+ * Obtain the number of entries that was calculated by
+ * alloc_large_system_hash().
+ */
+ entries = stack_hash_mask + 1;
+ }
+ init_stack_table(entries);
return 0;
}
@@ -202,7 +263,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
- stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+ stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -210,6 +271,7 @@ int stack_depot_init(void)
goto out_unlock;
}
stack_hash_mask = entries - 1;
+ init_stack_table(entries);
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
@@ -218,104 +280,275 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-/* Uses preallocated memory to initialize a new stack depot pool. */
-static void depot_init_pool(void **prealloc)
+/*
+ * Initializes new stack pool, and updates the list of pools.
+ */
+static bool depot_init_pool(void **prealloc)
{
+ lockdep_assert_held(&pool_lock);
+
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
+
+ if (!new_pool && *prealloc) {
+ /* We have preallocated memory, use it. */
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+ }
+
+ if (!new_pool)
+ return false; /* new_pool and *prealloc are NULL */
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = new_pool;
+
/*
- * If the next pool is already initialized or the maximum number of
+ * Stack depot tries to keep an extra pool allocated even before it runs
+ * out of space in the currently used pool.
+ *
+ * To indicate that a new preallocation is needed new_pool is reset to
+ * NULL; do not reset to NULL if we have reached the maximum number of
+ * pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool, NULL);
+ else
+ WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
+
+ /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+ WRITE_ONCE(pools_num, pools_num + 1);
+ ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
+ return true;
+}
+
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
+{
+ lockdep_assert_held(&pool_lock);
+
+ /*
+ * If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
- * smp_load_acquire() here pairs with smp_store_release() below and
- * in depot_alloc_stack().
*/
- if (!smp_load_acquire(&next_pool_required))
+ if (new_pool)
return;
- /* Check if the current pool is not yet allocated. */
- if (stack_pools[pool_index] == NULL) {
- /* Use the preallocated memory for the current pool. */
- stack_pools[pool_index] = *prealloc;
- *prealloc = NULL;
- } else {
- /*
- * Otherwise, use the preallocated memory for the next pool
- * as long as we do not exceed the maximum number of pools.
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS) {
- stack_pools[pool_index + 1] = *prealloc;
- *prealloc = NULL;
- }
- /*
- * At this point, either the next pool is initialized or the
- * maximum number of pools is reached. In either case, take
- * note that initializing another pool is not required.
- * This smp_store_release pairs with smp_load_acquire() above
- * and in stack_depot_save().
- */
- smp_store_release(&next_pool_required, 0);
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+}
+
+/*
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
+ */
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
+{
+ struct stack_record *stack;
+ void *current_pool;
+ u32 pool_index;
+
+ lockdep_assert_held(&pool_lock);
+
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
+ return NULL;
}
+
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
+ return NULL;
+
+ stack = current_pool + pool_offset;
+
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);
+
+ pool_offset += size;
+
+ return stack;
+}
+
+/* Try to find next free usable entry from the freelist. */
+static struct stack_record *depot_pop_free(void)
+{
+ struct stack_record *stack;
+
+ lockdep_assert_held(&pool_lock);
+
+ if (list_empty(&free_stacks))
+ return NULL;
+
+ /*
+ * We maintain the invariant that the elements in front are least
+ * recently used, and are therefore more likely to be associated with an
+ * RCU grace period in the past. Consequently it is sufficient to only
+ * check the first entry.
+ */
+ stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
+ return NULL;
+
+ list_del(&stack->free_list);
+ counters[DEPOT_COUNTER_FREELIST_SIZE]--;
+
+ return stack;
+}
+
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
+{
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
+
+ WARN_ON_ONCE(sizeof(s->entries) < used);
+
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
}
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{
- struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
+ struct stack_record *stack = NULL;
+ size_t record_size;
- required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+ lockdep_assert_held(&pool_lock);
- /* Check if there is not enough space in the current pool. */
- if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
- /* Bail out if we reached the pool limit. */
- if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return NULL;
- }
+ /* This should already be checked by public API entry points. */
+ if (WARN_ON_ONCE(!nr_entries))
+ return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
/*
- * Move on to the next pool.
- * WRITE_ONCE pairs with potential concurrent read in
- * stack_depot_fetch().
- */
- WRITE_ONCE(pool_index, pool_index + 1);
- pool_offset = 0;
- /*
- * If the maximum number of pools is not reached, take note
- * that the next pool needs to initialized.
- * smp_store_release() here pairs with smp_load_acquire() in
- * stack_depot_save() and depot_init_pool().
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
*/
- if (pool_index + 1 < DEPOT_MAX_POOLS)
- smp_store_release(&next_pool_required, 1);
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
+ stack = depot_pop_free();
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
}
- /* Assign the preallocated memory to a pool if required. */
- if (*prealloc)
- depot_init_pool(prealloc);
-
- /* Check if we have a pool to save the stack trace. */
- if (stack_pools[pool_index] == NULL)
- return NULL;
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }
/* Save the stack trace. */
- stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
- stack->size = size;
- stack->handle.pool_index = pool_index;
- stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
- stack->handle.valid = 1;
- stack->handle.extra = 0;
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- pool_offset += required_size;
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ refcount_set(&stack->count, 1);
+ counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]++;
+ } else {
+ /* Warn on attempts to switch to refcounting this entry. */
+ refcount_set(&stack->count, REFCOUNT_SATURATED);
+ counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+ counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
+ }
+
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, required_size);
+ kmsan_unpoison_memory(stack, record_size);
return stack;
}
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+{
+ const int pools_num_cached = READ_ONCE(pools_num);
+ union handle_parts parts = { .handle = handle };
+ void *pool;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
+
+ lockdep_assert_not_held(&pool_lock);
+
+ if (parts.pool_index > pools_num_cached) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pools_num_cached, handle);
+ return NULL;
+ }
+
+ pool = stack_pools[parts.pool_index];
+ if (WARN_ON(!pool))
+ return NULL;
+
+ stack = pool + offset;
+ if (WARN_ON(!refcount_read(&stack->count)))
+ return NULL;
+
+ return stack;
+}
+
+/* Links stack into the freelist. */
+static void depot_free_stack(struct stack_record *stack)
+{
+ unsigned long flags;
+
+ lockdep_assert_not_held(&pool_lock);
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
+
+ /*
+ * Remove the entry from the hash list. Concurrent list traversal may
+ * still observe the entry, but since the refcount is zero, this entry
+ * will no longer be considered as valid.
+ */
+ list_del_rcu(&stack->hash_list);
+
+ /*
+ * Due to being used from constrained contexts such as the allocators,
+ * NMI, or even RCU itself, stack depot cannot rely on primitives that
+ * would sleep (such as synchronize_rcu()) or recursively call into
+ * stack depot again (such as call_rcu()).
+ *
+ * Instead, get an RCU cookie, so that we can ensure this entry isn't
+ * moved onto another list until the next grace period, and concurrent
+ * RCU list traversal remains safe.
+ */
+ stack->rcu_state = get_state_synchronize_rcu();
+
+ /*
+ * Add the entry to the freelist tail, so that older entries are
+ * considered first - their RCU cookie is more likely to no longer be
+ * associated with the current grace period.
+ */
+ list_add_tail(&stack->free_list, &free_stacks);
+
+ counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ counters[DEPOT_COUNTER_REFD_FREES]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]--;
+
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+}
+
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
@@ -340,32 +573,72 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
}
/* Finds a stack in a bucket of the hash table. */
-static inline struct stack_record *find_stack(struct stack_record *bucket,
- unsigned long *entries, int size,
- u32 hash)
+static inline struct stack_record *find_stack(struct list_head *bucket,
+ unsigned long *entries, int size,
+ u32 hash, depot_flags_t flags)
{
- struct stack_record *found;
+ struct stack_record *stack, *ret = NULL;
+
+ /*
+ * Stack depot may be used from instrumentation that instruments RCU or
+ * tracing itself; use variant that does not call into RCU and cannot be
+ * traced.
+ *
+ * Note: Such use cases must take care when using refcounting to evict
+ * unused entries, because the stack record free-then-reuse code paths
+ * do call into RCU.
+ */
+ rcu_read_lock_sched_notrace();
+
+ list_for_each_entry_rcu(stack, bucket, hash_list) {
+ if (stack->hash != hash || stack->size != size)
+ continue;
+
+ /*
+ * This may race with depot_free_stack() accessing the freelist
+ * management state unioned with @entries. The refcount is zero
+ * in that case and the below refcount_inc_not_zero() will fail.
+ */
+ if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+ continue;
- for (found = bucket; found; found = found->next) {
- if (found->hash == hash &&
- found->size == size &&
- !stackdepot_memcmp(entries, found->entries, size))
- return found;
+ /*
+ * Try to increment refcount. If this succeeds, the stack record
+ * is valid and has not yet been freed.
+ *
+ * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+ * to then call stack_depot_put() later, and we can assume that
+ * a stack record is never placed back on the freelist.
+ */
+ if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+ continue;
+
+ ret = stack;
+ break;
}
- return NULL;
+
+ rcu_read_unlock_sched_notrace();
+
+ return ret;
}
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags, bool can_alloc)
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags)
{
- struct stack_record *found = NULL, **bucket;
- union handle_parts retval = { .handle = 0 };
+ struct list_head *bucket;
+ struct stack_record *found = NULL;
+ depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
unsigned long flags;
u32 hash;
+ if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
+ return 0;
+
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth.
@@ -377,28 +650,21 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled)
- goto fast_exit;
+ return 0;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
- /*
- * Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
- */
- found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
+ /* Fast path: look the stack trace up without locking. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (found)
goto exit;
/*
- * Check if another stack pool needs to be initialized. If so, allocate
- * the memory now - we won't be able to do that under the lock.
- *
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+ * Allocate memory for a new pool if required now:
+ * we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
+ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -413,29 +679,34 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
}
raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
- found = find_stack(*bucket, entries, nr_entries, hash);
+ /* Try to find again, to avoid concurrently inserting duplicates. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
if (new) {
- new->next = *bucket;
/*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
+ * This releases the stack record into the bucket and
+ * makes it visible to readers in find_stack().
*/
- smp_store_release(bucket, new);
+ list_add_rcu(&new->hash_list, bucket);
found = new;
}
- } else if (prealloc) {
+ }
+
+ if (prealloc) {
/*
- * Stack depot already contains this stack trace, but let's
- * keep the preallocated memory for the future.
+ * Either stack depot already contains this stack trace, or
+ * depot_alloc_stack() did not consume the preallocated memory.
+ * Try to keep the preallocated memory for future.
*/
- depot_init_pool(&prealloc);
+ depot_keep_new_pool(&prealloc);
}
+ printk_deferred_exit();
raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
@@ -443,31 +714,23 @@ exit:
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
- retval.handle = found->handle.handle;
-fast_exit:
- return retval.handle;
+ handle = found->handle.handle;
+ return handle;
}
-EXPORT_SYMBOL_GPL(__stack_depot_save);
+EXPORT_SYMBOL_GPL(stack_depot_save_flags);
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+ return stack_depot_save_flags(entries, nr_entries, alloc_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
- union handle_parts parts = { .handle = handle };
- /*
- * READ_ONCE pairs with potential concurrent write in
- * depot_alloc_stack.
- */
- int pool_index_cached = READ_ONCE(pool_index);
- void *pool;
- size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
*entries = NULL;
@@ -477,24 +740,42 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
*/
kmsan_unpoison_memory(entries, sizeof(*entries));
- if (!handle)
+ if (!handle || stack_depot_disabled)
return 0;
- if (parts.pool_index > pool_index_cached) {
- WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pool_index_cached, handle);
- return 0;
- }
- pool = stack_pools[parts.pool_index];
- if (!pool)
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should never be NULL, otherwise this is a use-after-put (or just a
+ * corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
return 0;
- stack = pool + offset;
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
+void stack_depot_put(depot_stack_handle_t handle)
+{
+ struct stack_record *stack;
+
+ if (!handle || stack_depot_disabled)
+ return;
+
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should always be able to find the stack record, otherwise this is an
+ * unbalanced put attempt (or corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+ return;
+
+ if (refcount_dec_and_test(&stack->count))
+ depot_free_stack(stack);
+}
+EXPORT_SYMBOL_GPL(stack_depot_put);
+
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
@@ -539,3 +820,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
return parts.extra;
}
EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+ /*
+ * data race ok: These are just statistics counters, and approximate
+ * statistics are ok for debugging.
+ */
+ seq_printf(seq, "pools: %d\n", data_race(pools_num));
+ for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+ seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ if (stack_depot_disabled)
+ return 0;
+
+ dir = debugfs_create_dir("stackdepot", NULL);
+ debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+ return 0;
+}
+late_initcall(depot_debugfs_init);
diff --git a/lib/string.c b/lib/string.c
index be26623953d2..6891d15ce991 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -103,21 +103,6 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- __builtin_memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-EXPORT_SYMBOL(strlcpy);
-#endif
-
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *dest, const char *src, size_t count)
{
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7916503e6a6a..569e6d2dc55c 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5144,22 +5144,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
- {
- "ALU_MOVSX | BPF_W",
- .u.insns_int = {
- BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
- BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
- BPF_MOVSX32_REG(R1, R3, 32),
- BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
- BPF_MOV32_IMM(R0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(R0, 1),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { },
- { { 0, 0x1 } },
- },
/* MOVSX64 REG */
{
"ALU64_MOVSX | BPF_B",
@@ -6293,7 +6277,7 @@ static struct bpf_test tests[] = {
},
/* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
{
- "ALU64_SMOD_X: -7 % 2 = -1",
+ "ALU64_SMOD_K: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
@@ -12215,7 +12199,6 @@ static struct bpf_test tests[] = {
BPF_JMP32_IMM_ZEXT(JLE),
BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSGE),
- BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSLT),
BPF_JMP32_IMM_ZEXT(JSLE),
#undef BPF_JMP2_IMM_ZEXT
@@ -12251,7 +12234,6 @@ static struct bpf_test tests[] = {
BPF_JMP32_REG_ZEXT(JLE),
BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSGE),
- BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSLT),
BPF_JMP32_REG_ZEXT(JSLE),
#undef BPF_JMP2_REG_ZEXT
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index add4699fc6cd..9cfdcd6d21db 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -1132,6 +1132,7 @@ static const char * const fw_upload_err_str[] = {
[FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
[FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
[FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+ [FW_UPLOAD_ERR_FW_INVALID] = "firmware-invalid",
};
static void upload_err_inject_error(struct test_firmware_upload *tst,
diff --git a/lib/test_fortify/write_overflow-strlcpy-src.c b/lib/test_fortify/write_overflow-strlcpy-src.c
deleted file mode 100644
index 91bf83ebd34a..000000000000
--- a/lib/test_fortify/write_overflow-strlcpy-src.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST \
- strlcpy(small, large_src, sizeof(small) + 1)
-
-#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strlcpy.c b/lib/test_fortify/write_overflow-strlcpy.c
deleted file mode 100644
index 1883db7c0cd6..000000000000
--- a/lib/test_fortify/write_overflow-strlcpy.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST \
- strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1)
-
-#include "test_fortify.h"
diff --git a/lib/test_ida.c b/lib/test_ida.c
index 55105baa19da..072a49897e71 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -13,7 +13,7 @@ static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
-void ida_dump(struct ida *ida) { }
+static void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 464eeb90d5ad..29185ac5c727 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
@@ -54,6 +55,11 @@ atomic_t maple_tree_tests_passed;
#else
#define cond_resched() do {} while (0)
#endif
+
+#define mas_is_none(x) ((x)->status == ma_none)
+#define mas_is_overflow(x) ((x)->status == ma_overflow)
+#define mas_is_underflow(x) ((x)->status == ma_underflow)
+
static int __init mtree_insert_index(struct maple_tree *mt,
unsigned long index, gfp_t gfp)
{
@@ -582,7 +588,7 @@ static noinline void __init check_find(struct maple_tree *mt)
MT_BUG_ON(mt, last != mas.last);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.index = ULONG_MAX;
mas.last = ULONG_MAX;
entry2 = mas_prev(&mas, 0);
@@ -1749,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
@@ -1834,47 +1853,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
-static noinline void __init check_forking(struct maple_tree *mt)
+static noinline void __init check_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
- mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- down_write(&newmt_lock);
- mas.index = 0;
- mas.last = 0;
- if (mas_expected_entries(&newmas, nr_entries)) {
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
- rcu_read_lock();
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
- rcu_read_unlock();
+
mas_destroy(&newmas);
+ mas_destroy(&mas);
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
+ __mt_destroy(&mt);
up_write(&newmt_lock);
+ up_write(&mt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1977,49 +1997,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
-static noinline void __init bench_forking(struct maple_tree *mt)
+static noinline void __init bench_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134, nr_fork = 80000;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- mt_set_external_lock(&newmt, &newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
for (i = 0; i < nr_fork; i++) {
- mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- mas.index = 0;
- mas.last = 0;
- rcu_read_lock();
- down_write(&newmt_lock);
- if (mas_expected_entries(&newmas, nr_entries)) {
- printk("OOM!");
+ mt_init_flags(&newmt,
+ MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
BUG_ON(1);
}
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
+
mas_destroy(&newmas);
- rcu_read_unlock();
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
up_write(&newmt_lock);
}
+ mas_destroy(&mas);
+ __mt_destroy(&mt);
+ up_write(&mt_lock);
}
#endif
@@ -2175,7 +2197,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
mas.index = 0;
mas.last = 5;
@@ -3039,10 +3061,6 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE active active range of NULL
*/
-#define mas_active(x) (((x).node != MAS_ROOT) && \
- ((x).node != MAS_START) && \
- ((x).node != MAS_PAUSE) && \
- ((x).node != MAS_NONE))
static noinline void __init check_state_handling(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
@@ -3057,7 +3075,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
/* prev: Start -> underflow*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
/* prev: Start -> root */
mas_set(&mas, 10);
@@ -3065,7 +3083,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* prev: pause -> root */
mas_set(&mas, 10);
@@ -3074,7 +3092,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* next: start -> none */
mas_set(&mas, 0);
@@ -3082,7 +3100,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* next: start -> none*/
mas_set(&mas, 10);
@@ -3090,7 +3108,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> root */
mas_set(&mas, 0);
@@ -3098,21 +3116,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find: root -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: none -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> none */
mas_set(&mas, 10);
@@ -3120,14 +3138,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> root */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: start -> root */
mas_set(&mas, 0);
@@ -3135,21 +3153,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: root -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: start -> root */
mas_set(&mas, 10);
@@ -3157,7 +3175,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: start -> none */
mas_set(&mas, 10);
@@ -3165,7 +3183,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: pause -> none*/
mas_set(&mas, 10);
@@ -3174,7 +3192,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
mas.index = mas.last = 10;
@@ -3182,14 +3200,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: start -> root */
mas_set(&mas, 0);
@@ -3197,7 +3215,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: pause -> root */
mas_set(&mas, 0);
@@ -3206,22 +3224,22 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: none -> root */
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> root */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> none */
mas_set(&mas, 10);
@@ -3229,7 +3247,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> root */
mas.index = mas.last = 0;
@@ -3237,7 +3255,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
mas_unlock(&mas);
@@ -3255,7 +3273,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: pause ->active */
mas_set(&mas, 0);
@@ -3264,126 +3282,132 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none ->active */
mas.index = mas.last = 0;
mas.offset = 0;
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active ->active */
- entry = mas_next(&mas, ULONG_MAX);
+ /* next:active ->active (spanning limit) */
+ entry = mas_next(&mas, 0x2100);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> active beyond data */
+ /* next:active -> overflow (limit reached) beyond data */
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
- /* Continue after last range ends after max */
+ /* next:overflow -> active (limit changed) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
-
- /* next:active -> active continued */
- entry = mas_next(&mas, ULONG_MAX);
- MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.index != 0x3501);
- MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> overflow */
+ /* next:active -> overflow (limit reached) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* next:overflow -> overflow */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* prev:overflow -> active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none -> active, skip value at location */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.offset = 0;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:active ->active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning end range */
+ /* prev:active -> underflow (span limit) */
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0x1200);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */
+ entry = mas_prev(&mas, 0x1200); /* underflow */
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow (lower limit) spanning end range */
entry = mas_prev(&mas, 0x0100);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
- /* prev:active -> underflow */
+ /* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* next:underflow -> active */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:first value -> underflow */
entry = mas_prev(&mas, 0x1000);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find:underflow -> first value */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev: pause ->active */
mas_set(&mas, 0x3600);
@@ -3394,21 +3418,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning min */
+ /* prev:active -> underflow spanning min */
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev: active ->active, continue */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active */
mas_set(&mas, 0);
@@ -3416,7 +3440,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: pause ->active */
mas_set(&mas, 0);
@@ -3425,7 +3449,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active on value */;
mas_set(&mas, 1200);
@@ -3433,14 +3457,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active ->active */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL)*/
@@ -3448,35 +3472,35 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find: overflow ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL) end*/
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find_rev: active (END) ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev:active ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev: pause ->active */
mas_pause(&mas);
@@ -3484,14 +3508,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* find_rev:active -> active */
+ /* find_rev:active -> underflow */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find_rev: start ->active */
mas_set(&mas, 0x1200);
@@ -3499,7 +3523,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1200);
@@ -3507,7 +3531,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1600);
@@ -3515,7 +3539,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause ->active */
mas_set(&mas, 0x1200);
@@ -3524,7 +3548,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause -> active */
mas_set(&mas, 0x1600);
@@ -3533,25 +3557,25 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1200);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1600);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1200;
@@ -3561,7 +3585,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1600;
@@ -3570,7 +3594,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
mas_unlock(&mas);
}
@@ -3585,10 +3609,6 @@ static int __init maple_tree_seed(void)
pr_info("\nTEST STARTING\n\n");
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_root_expand(&tree);
- mtree_destroy(&tree);
-
#if defined(BENCH_SLOT_STORE)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3617,13 +3637,18 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
-#if defined(BENCH_FORK)
+#if defined(BENCH_LOAD)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- bench_forking(&tree);
+ bench_load(&tree);
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ bench_forking();
+ goto skip;
+#endif
#if defined(BENCH_MT_FOR_EACH)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3647,13 +3672,15 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_iteration(&tree);
+ check_root_expand(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_forking(&tree);
+ check_iteration(&tree);
mtree_destroy(&tree);
+ check_forking();
+
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 0ae35223d773..0dc173849a54 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;
- for (i = 0; i <= MAX_ORDER; i++)
+ for (i = 0; i < NR_PAGE_ORDERS; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c20f6cb4bf55..42b585208249 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,6 +16,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
+#include <linux/rcupdate_wait.h>
#include <linux/rhashtable.h>
#include <linux/slab.h>
#include <linux/sched.h>
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 8036aa91a1cb..9321d850931f 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -35,6 +35,8 @@ static struct {
struct ctl_table_header *test_h_setup_node;
struct ctl_table_header *test_h_mnt;
struct ctl_table_header *test_h_mnterror;
+ struct ctl_table_header *empty_add;
+ struct ctl_table_header *empty;
} sysctl_test_headers;
struct test_sysctl_data {
@@ -130,7 +132,6 @@ static struct ctl_table test_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
- { }
};
static void test_sysctl_calc_match_int_ok(void)
@@ -184,7 +185,6 @@ static struct ctl_table test_table_unregister[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
},
- {}
};
static int test_sysctl_run_unregister_nested(void)
@@ -220,6 +220,25 @@ static int test_sysctl_run_register_mount_point(void)
return 0;
}
+static struct ctl_table test_table_empty[] = { };
+
+static int test_sysctl_run_register_empty(void)
+{
+ /* Tets that an empty dir can be created */
+ sysctl_test_headers.empty_add
+ = register_sysctl("debug/test_sysctl/empty_add", test_table_empty);
+ if (!sysctl_test_headers.empty_add)
+ return -ENOMEM;
+
+ /* Test that register on top of an empty dir works */
+ sysctl_test_headers.empty
+ = register_sysctl("debug/test_sysctl/empty_add/empty", test_table_empty);
+ if (!sysctl_test_headers.empty)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int __init test_sysctl_init(void)
{
int err;
@@ -233,6 +252,10 @@ static int __init test_sysctl_init(void)
goto out;
err = test_sysctl_run_register_mount_point();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_register_empty();
out:
return err;
@@ -248,6 +271,10 @@ static void __exit test_sysctl_exit(void)
unregister_sysctl_table(sysctl_test_headers.test_h_mnt);
if (sysctl_test_headers.test_h_mnterror)
unregister_sysctl_table(sysctl_test_headers.test_h_mnterror);
+ if (sysctl_test_headers.empty)
+ unregister_sysctl_table(sysctl_test_headers.empty);
+ if (sysctl_test_headers.empty_add)
+ unregister_sysctl_table(sysctl_test_headers.empty_add);
}
module_exit(test_sysctl_exit);
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
index 62b4e8b3c733..a94cd56a1e4c 100644
--- a/lib/trace_readwrite.c
+++ b/lib/trace_readwrite.c
@@ -7,7 +7,7 @@
#include <linux/ftrace.h>
#include <linux/module.h>
-#include <asm-generic/io.h>
+#include <linux/io.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rwmmio.h>
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 3f90810f9f42..df4f8d1354bb 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -204,8 +204,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
- pr_err("========================================"
- "========================================\n");
+ pr_warn(CUT_HERE);
+
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
@@ -215,8 +215,7 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
static void ubsan_epilogue(void)
{
dump_stack();
- pr_err("========================================"
- "========================================\n");
+ pr_warn("---[ end trace ]---\n");
current->in_ubsan--;